diff --git a/Tribler/Category/Category.py b/Tribler/Category/Category.py index 27bad8fe1aa..c339d362aaa 100644 --- a/Tribler/Category/Category.py +++ b/Tribler/Category/Category.py @@ -10,7 +10,7 @@ from Tribler.__init__ import LIBRARYNAME -DEBUG=False +DEBUG = False category_file = "category.conf" @@ -24,10 +24,10 @@ def __init__(self, install_dir='.'): if Category.__single: raise RuntimeError, "Category is singleton" - filename = os.path.join(install_dir,LIBRARYNAME, 'Category', category_file) + filename = os.path.join(install_dir, LIBRARYNAME, 'Category', category_file) Category.__single = self self.utility = None - #self.torrent_db = TorrentDBHandler.getInstance() # Arno, 2009-01-30: apparently unused + # self.torrent_db = TorrentDBHandler.getInstance() # Arno, 2009-01-30: apparently unused try: self.category_info = getCategoryInfo(filename) self.category_info.sort(rankcmp) @@ -38,9 +38,8 @@ def __init__(self, install_dir='.'): self.xxx_filter = XXXFilter(install_dir) - if DEBUG: - print >>sys.stderr,"category: Categories defined by user",self.getCategoryNames() + print >> sys.stderr, "category: Categories defined by user", self.getCategoryNames() # return Category instance @@ -52,7 +51,7 @@ def getInstance(*args, **kw): def init_from_main(self, utility): self.utility = utility - self.set_family_filter(None) # init family filter to saved state + self.set_family_filter(None) # init family filter to saved state def getCategoryKeys(self): if self.category_info is None: @@ -65,7 +64,7 @@ def getCategoryKeys(self): keys.sort() return keys - def getCategoryNames(self, filter = True): + def getCategoryNames(self, filter=True): if self.category_info is None: return [] keys = [] @@ -73,7 +72,7 @@ def getCategoryNames(self, filter = True): rank = category['rank'] if rank == -1 and filter: break - keys.append((category['name'],category['displayname'])) + keys.append((category['name'], category['displayname'])) return keys def hasActiveCategory(self, torrent): @@ -82,16 +81,16 @@ def hasActiveCategory(self, torrent): except: print >> sys.stderr, 'Torrent: %s has no valid category' % `torrent['content_name']` return False - for category in [{'name':'other', 'rank':1}]+self.category_info: + for category in [{'name':'other', 'rank':1}] + self.category_info: rank = category['rank'] if rank == -1: break if name.lower() == category['name'].lower(): return True - #print >> sys.stderr, 'Category: %s was not in %s' % (name.lower(), [a['name'].lower() for a in self.category_info if a['rank'] != -1]) + # print >> sys.stderr, 'Category: %s was not in %s' % (name.lower(), [a['name'].lower() for a in self.category_info if a['rank'] != -1]) return False - def getCategoryRank(self,cat): + def getCategoryRank(self, cat): for category in self.category_info: if category['name'] == cat: return category['rank'] @@ -111,11 +110,11 @@ def calculateCategory(self, torrent_dict, display_name): files_list.append((ifiles['path'][-1], ifiles['length'] / float(self.__size_change))) except KeyError: # single mode - files_list.append((torrent_dict['info']["name"],torrent_dict['info']['length'] / float(self.__size_change))) + files_list.append((torrent_dict['info']["name"], torrent_dict['info']['length'] / float(self.__size_change))) tracker = torrent_dict.get('announce') if not tracker: - tracker = torrent_dict.get('announce-list',[['']])[0][0] + tracker = torrent_dict.get('announce-list', [['']])[0][0] comment = torrent_dict.get('comment') return self.calculateCategoryNonDict(files_list, display_name, tracker, comment) @@ -134,7 +133,7 @@ def calculateCategoryNonDict(self, files_list, display_name, tracker, comment): torrent_category = None # filename_list ready strongest_cat = 0.0 - for category in self.category_info: # for each category + for category in self.category_info: # for each category (decision, strength) = self.judge(category, files_list, display_name) if decision and (strength > strongest_cat): torrent_category = [category['name']] @@ -147,7 +146,7 @@ def calculateCategoryNonDict(self, files_list, display_name, tracker, comment): # judge whether a torrent file belongs to a certain category # return bool - def judge(self, category, files_list, display_name = ''): + def judge(self, category, files_list, display_name=''): # judge file keywords display_name = display_name.lower() @@ -164,7 +163,7 @@ def judge(self, category, files_list, display_name = ''): if 'strength' in category: return (True, category['strength']) else: - return (True, (1- factor)) + return (True, (1 - factor)) # judge each file matchSize = 0 @@ -172,14 +171,14 @@ def judge(self, category, files_list, display_name = ''): for name, length in files_list: totalSize += length # judge file size - if ( length < category['minfilesize'] ) or \ - (category['maxfilesize'] > 0 and length > category['maxfilesize'] ): + if (length < category['minfilesize']) or \ + (category['maxfilesize'] > 0 and length > category['maxfilesize']): continue # judge file suffix OK = False for isuffix in category['suffix']: - if name.lower().endswith( isuffix ): + if name.lower().endswith(isuffix): OK = True break if OK: @@ -194,7 +193,7 @@ def judge(self, category, files_list, display_name = ''): # pass try: fileKeywords.index(ikeywords) - #print ikeywords + # print ikeywords factor *= 1 - category['keywords'][ikeywords] except: pass @@ -207,7 +206,7 @@ def judge(self, category, files_list, display_name = ''): if 'strength' in category: return (True, category['strength']) else: - return (True, (matchSize/ totalSize)) + return (True, (matchSize / totalSize)) return (False, 0) @@ -234,12 +233,12 @@ def family_filter_enabled(self): def set_family_filter(self, b=None): assert b in (True, False, None) old = self.family_filter_enabled() - if b != old or b is None: # update category data if initial call, or if state changes + if b != old or b is None: # update category data if initial call, or if state changes if b is None: - b=old + b = old if self.utility is None: return - #print >> sys.stderr , b + # print >> sys.stderr , b if b: self.utility.config.Write('family_filter', '1') else: @@ -260,7 +259,7 @@ def get_family_filter_sql(self, _getCategoryID, table_name=''): if self.family_filter_enabled(): forbiddencats = [cat['name'] for cat in self.category_info if cat['rank'] == -1] if table_name: - table_name+='.' + table_name += '.' if forbiddencats: return " and %scategory_id not in (%s)" % (table_name, ','.join([str(_getCategoryID([cat])) for cat in forbiddencats])) return '' @@ -268,7 +267,7 @@ def get_family_filter_sql(self, _getCategoryID, table_name=''): -def rankcmp(a,b): +def rankcmp(a, b): if not ('rank' in a): return 1 elif not ('rank' in b): diff --git a/Tribler/Core/CacheDB/sqlitecachedb.py b/Tribler/Core/CacheDB/sqlitecachedb.py index 134a6f75f3d..a8c158f75dc 100644 --- a/Tribler/Core/CacheDB/sqlitecachedb.py +++ b/Tribler/Core/CacheDB/sqlitecachedb.py @@ -26,27 +26,27 @@ except ImportError: from Tribler.dispersy.python27_ordereddict import OrderedDict -#support_version = (3,5,9) -#support_version = (3,3,13) -#apsw_version = tuple([int(r) for r in apsw.apswversion().split('-')[0].split('.')]) -##print apsw_version -#assert apsw_version >= support_version, "Required APSW Version >= %d.%d.%d."%support_version + " But your version is %d.%d.%d.\n"%apsw_version + \ +# support_version = (3,5,9) +# support_version = (3,3,13) +# apsw_version = tuple([int(r) for r in apsw.apswversion().split('-')[0].split('.')]) +# #print apsw_version +# assert apsw_version >= support_version, "Required APSW Version >= %d.%d.%d."%support_version + " But your version is %d.%d.%d.\n"%apsw_version + \ # "Please download and install it from http://code.google.com/p/apsw/" -##Changed from 4 to 5 by andrea for subtitles support -##Changed from 5 to 6 by George Milescu for ProxyService -##Changed from 6 to 7 for Raynor's TermFrequency table -##Changed from 7 to 8 for Raynor's BundlerPreference table -##Changed from 8 to 9 for Niels's Open2Edit tables -##Changed from 9 to 10 for Fix in Open2Edit PlayListTorrent table -##Changed from 10 to 11 add a index on channeltorrent.torrent_id to improve search performance -##Changed from 11 to 12 imposing some limits on the Tribler database -##Changed from 12 to 13 introduced swift-url modification type -##Changed from 13 to 14 introduced swift_hash/swift_torrent_hash torrent columns + upgrade script -##Changed from 14 to 15 added indices on swift_hash/swift_torrent_hash torrent -##Changed from 15 to 16 changed all swift_torrent_hash that was an empty string to NULL -##Changed from 16 to 17 cleaning buddycast, preference, terms, and subtitles tables, removed indices -##Changed from 17 to 18 added swift-thumbnails/video-info metadatatypes +# #Changed from 4 to 5 by andrea for subtitles support +# #Changed from 5 to 6 by George Milescu for ProxyService +# #Changed from 6 to 7 for Raynor's TermFrequency table +# #Changed from 7 to 8 for Raynor's BundlerPreference table +# #Changed from 8 to 9 for Niels's Open2Edit tables +# #Changed from 9 to 10 for Fix in Open2Edit PlayListTorrent table +# #Changed from 10 to 11 add a index on channeltorrent.torrent_id to improve search performance +# #Changed from 11 to 12 imposing some limits on the Tribler database +# #Changed from 12 to 13 introduced swift-url modification type +# #Changed from 13 to 14 introduced swift_hash/swift_torrent_hash torrent columns + upgrade script +# #Changed from 14 to 15 added indices on swift_hash/swift_torrent_hash torrent +# #Changed from 15 to 16 changed all swift_torrent_hash that was an empty string to NULL +# #Changed from 16 to 17 cleaning buddycast, preference, terms, and subtitles tables, removed indices +# #Changed from 17 to 18 added swift-thumbnails/video-info metadatatypes # Arno, 2012-08-01: WARNING You must also update the version number that is # written to the DB in the schema_sdb_v*.sql file!!! @@ -54,11 +54,11 @@ TEST_SQLITECACHEDB_UPGRADE = False CREATE_SQL_FILE = None -CREATE_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'schema_sdb_v'+str(CURRENT_MAIN_DB_VERSION)+'.sql') +CREATE_SQL_FILE_POSTFIX = os.path.join(LIBRARYNAME, 'schema_sdb_v' + str(CURRENT_MAIN_DB_VERSION) + '.sql') DB_FILE_NAME = 'tribler.sdb' -DB_DIR_NAME = 'sqlite' # db file path = DB_DIR_NAME/DB_FILE_NAME +DB_DIR_NAME = 'sqlite' # db file path = DB_DIR_NAME/DB_FILE_NAME DEFAULT_BUSY_TIMEOUT = 10000 -MAX_SQL_BATCHED_TO_TRANSACTION = 1000 # don't change it unless carefully tested. A transaction with 1000 batched updates took 1.5 seconds +MAX_SQL_BATCHED_TO_TRANSACTION = 1000 # don't change it unless carefully tested. A transaction with 1000 batched updates took 1.5 seconds NULL = None icon_dir = None SHOW_ALL_EXECUTE = False @@ -66,6 +66,7 @@ cost_reads = [] torrent_dir = None config_dir = None +install_dir = None TEST_OVERRIDE = False DEBUG = False @@ -80,9 +81,9 @@ from random import randint from os.path import exists from time import time - DB_DEBUG_FILE="tribler_database_queries_%d.txt" % randint(1,9999999) + DB_DEBUG_FILE = "tribler_database_queries_%d.txt" % randint(1, 9999999) while exists(DB_DEBUG_FILE): - DB_DEBUG_FILE="tribler_database_queries_%d.txt" % randint(1,9999999) + DB_DEBUG_FILE = "tribler_database_queries_%d.txt" % randint(1, 9999999) class Warning(Exception): @@ -98,20 +99,21 @@ def __setitem__(self, *args, **kargs): if len(self) > self._limit: self.popitem(last=False) -def init(config, db_exception_handler = None): +def init(config, db_exception_handler=None): """ create sqlite database """ global CREATE_SQL_FILE global icon_dir global torrent_dir global config_dir + global install_dir torrent_dir = os.path.abspath(config['torrent_collecting_dir']) config_dir = config['state_dir'] install_dir = config['install_dir'] - CREATE_SQL_FILE = os.path.join(install_dir,CREATE_SQL_FILE_POSTFIX) + CREATE_SQL_FILE = os.path.join(install_dir, CREATE_SQL_FILE_POSTFIX) sqlitedb = SQLiteCacheDB.getInstance(db_exception_handler) sqlite_db_path = os.path.join(config_dir, DB_DIR_NAME, DB_FILE_NAME) - print >>sys.stderr,"cachedb: init: SQL FILE",sqlite_db_path + print >> sys.stderr, "cachedb: init: SQL FILE", sqlite_db_path icon_dir = os.path.abspath(config['peer_icon_path']) @@ -125,15 +127,15 @@ def done(): # SQLiteCacheDB.getInstance().close() -def make_filename(config_dir,filename): +def make_filename(config_dir, filename): if config_dir is None: return filename else: - return os.path.join(config_dir,filename) + return os.path.join(config_dir, filename) def bin2str(bin): # Full BASE64-encoded - return encodestring(bin).replace("\n","") + return encodestring(bin).replace("\n", "") def str2bin(str): return decodestring(str) @@ -163,16 +165,16 @@ def print_exc_plus(): frame.f_lineno) for key, value in frame.f_locals.items(): print >> sys.stderr, "\t%20s = " % key, - #We have to be careful not to cause a new error in our error - #printer! Calling str() on an unknown object could cause an - #error we don't want. + # We have to be careful not to cause a new error in our error + # printer! Calling str() on an unknown object could cause an + # error we don't want. try: print >> sys.stderr, value except: print >> sys.stderr, "" def debugTime(func): - def invoke_func(*args,**kwargs): + def invoke_func(*args, **kwargs): if DEBUG_TIME: t1 = time() @@ -231,20 +233,20 @@ def values(self): class SQLiteCacheDBBase: lock = threading.RLock() - def __init__(self,db_exception_handler=None): + def __init__(self, db_exception_handler=None): self.exception_handler = db_exception_handler - self.cursor_table = safe_dict() # {thread_name:cur} - self.cache_transaction_table = safe_dict() # {thread_name:[sql] - self.class_variables = safe_dict({'db_path':None,'busytimeout':None}) # busytimeout is in milliseconds + self.cursor_table = safe_dict() # {thread_name:cur} + self.cache_transaction_table = safe_dict() # {thread_name:[sql] + self.class_variables = safe_dict({'db_path':None, 'busytimeout':None}) # busytimeout is in milliseconds # Arno, 2012-08-02: As there is just Dispersy thread here, removing # safe_dict() here # 24/09/12 Boudewijn: changed into LimitedOrderedDict to limit memory consumption - self.permid_id = LimitedOrderedDict(1024*5) # {} # safe_dict() - self.infohash_id = LimitedOrderedDict(1024*5) # {} # safe_dict() + self.permid_id = LimitedOrderedDict(1024 * 5) # {} # safe_dict() + self.infohash_id = LimitedOrderedDict(1024 * 5) # {} # safe_dict() self.show_execute = False - #TODO: All global variables must be protected to be thread safe? + # TODO: All global variables must be protected to be thread safe? self.status_table = None self.category_table = None self.src_table = None @@ -270,13 +272,13 @@ def close(self, clean=False): del self.cache_transaction_table[thread_name] except: print_exc() - if clean: # used for test suite + if clean: # used for test suite # Arno, 2012-08-02: As there is just Dispery thread here, removing # safe_dict() here - self.permid_id = {} # safe_dict() - self.infohash_id = {} # safe_dict() + self.permid_id = {} # safe_dict() + self.infohash_id = {} # safe_dict() self.exception_handler = None - self.class_variables = safe_dict({'db_path':None,'busytimeout':None}) + self.class_variables = safe_dict({'db_path':None, 'busytimeout':None}) self.cursor_table = safe_dict() self.cache_transaction_table = safe_dict() @@ -285,10 +287,10 @@ def close(self, clean=False): def getCursor(self, create=True): thread_name = threading.currentThread().getName() curs = self.cursor_table - cur = curs.get(thread_name, None) # return [cur, cur, lib] or None - #print >> sys.stderr, '-------------- getCursor::', len(curs), time(), curs.keys() + cur = curs.get(thread_name, None) # return [cur, cur, lib] or None + # print >> sys.stderr, '-------------- getCursor::', len(curs), time(), curs.keys() if cur is None and create and self.class_variables['db_path']: - self.openDB(self.class_variables['db_path'], self.class_variables['busytimeout']) # create a new db obj for this thread + self.openDB(self.class_variables['db_path'], self.class_variables['busytimeout']) # create a new db obj for this thread cur = curs.get(thread_name) return cur @@ -303,15 +305,15 @@ def openDB(self, dbfile_path=None, busytimeout=DEFAULT_BUSY_TIMEOUT): # already opened a db in this thread, reuse it thread_name = threading.currentThread().getName() - #print >>sys.stderr,"sqlcachedb: openDB",dbfile_path,thread_name + # print >>sys.stderr,"sqlcachedb: openDB",dbfile_path,thread_name if thread_name in self.cursor_table: - #assert dbfile_path == None or self.class_variables['db_path'] == dbfile_path + # assert dbfile_path == None or self.class_variables['db_path'] == dbfile_path return self.cursor_table[thread_name] assert dbfile_path, "You must specify the path of database file" if dbfile_path.lower() != ':memory:': - db_dir,db_filename = os.path.split(dbfile_path) + db_dir, db_filename = os.path.split(dbfile_path) if db_dir and not os.path.isdir(db_dir): os.makedirs(db_dir) @@ -345,8 +347,8 @@ def openDB(self, dbfile_path=None, busytimeout=DEFAULT_BUSY_TIMEOUT): cur.execute("PRAGMA synchronous = NORMAL;") cur.execute("PRAGMA cache_size = 10000;") - #Niels 19-09-2012: even though my database upgraded to increase the pagesize it did not keep wal mode? - #Enabling WAL on every starup + # Niels 19-09-2012: even though my database upgraded to increase the pagesize it did not keep wal mode? + # Enabling WAL on every starup cur.execute("PRAGMA journal_mode = WAL;") return cur @@ -365,10 +367,10 @@ def createDBTable(self, sql_create_table, dbfile_path, busytimeout=DEFAULT_BUSY_ cur.execute(sql_create_table) # it is suggested to include begin & commit in the script def initDB(self, sqlite_filepath, - create_sql_filename = None, - busytimeout = DEFAULT_BUSY_TIMEOUT, - check_version = True, - current_db_version = CURRENT_MAIN_DB_VERSION): + create_sql_filename=None, + busytimeout=DEFAULT_BUSY_TIMEOUT, + check_version=True, + current_db_version=CURRENT_MAIN_DB_VERSION): """ Create and initialize a SQLite database given a sql script. Only one db can be opened. If the given dbfile_path is different with the opened DB file, warn and exit @@ -379,30 +381,30 @@ def initDB(self, sqlite_filepath, if failed to acquire a lock. Default = 5000 milliseconds """ if create_sql_filename is None: - create_sql_filename=CREATE_SQL_FILE + create_sql_filename = CREATE_SQL_FILE try: self.lock.acquire() # verify db path identity class_db_path = self.class_variables['db_path'] - if sqlite_filepath is None: # reuse the opened db file? - if class_db_path is not None: # yes, reuse it + if sqlite_filepath is None: # reuse the opened db file? + if class_db_path is not None: # yes, reuse it # reuse the busytimeout return self.openDB(class_db_path, self.class_variables['busytimeout']) - else: # no db file opened + else: # no db file opened raise Exception, "You must specify the path of database file when open it at the first time" else: - if class_db_path is None: # the first time to open db path, store it + if class_db_path is None: # the first time to open db path, store it - #print 'quit now' - #sys.exit(0) + # print 'quit now' + # sys.exit(0) # open the db if it exists (by converting from bsd) and is not broken, otherwise create a new one # it will update the db if necessary by checking the version number self.safelyOpenTriblerDB(sqlite_filepath, create_sql_filename, busytimeout, check_version=check_version, current_db_version=current_db_version) self.class_variables = {'db_path': sqlite_filepath, 'busytimeout': int(busytimeout)} - return self.openDB() # return the cursor, won't reopen the db + return self.openDB() # return the cursor, won't reopen the db elif sqlite_filepath != class_db_path: # not the first time to open db path, check if it is the same raise Exception, "Only one database file can be opened. You have opened %s and are trying to open %s." % (class_db_path, sqlite_filepath) @@ -445,12 +447,12 @@ def safelyOpenTriblerDB(self, dbfile_path, sql_create, busytimeout=DEFAULT_BUSY_ cur = self.openDB(dbfile_path, busytimeout) if check_version: sqlite_db_version = self.readDBVersion() - if sqlite_db_version == NULL or int(sqlite_db_version)<1: + if sqlite_db_version == NULL or int(sqlite_db_version) < 1: raise NotImplementedError except Exception, exception: if isinstance(exception, Warning): # user friendly warning to log the creation of a new database - print >>sys.stderr, exception + print >> sys.stderr, exception else: # user unfriendly exception message because something went wrong @@ -477,17 +479,17 @@ def safelyOpenTriblerDB(self, dbfile_path, sql_create, busytimeout=DEFAULT_BUSY_ def checkDB(self, db_ver, curr_ver): # read MyDB and check the version number. if not db_ver or not curr_ver: - self.updateDB(db_ver,curr_ver) + self.updateDB(db_ver, curr_ver) return db_ver = int(db_ver) curr_ver = int(curr_ver) - #print "check db", db_ver, curr_ver + # print "check db", db_ver, curr_ver if db_ver != curr_ver or \ (not config_dir is None and (os.path.exists(os.path.join(config_dir, "upgradingdb.txt")) or os.path.exists(os.path.join(config_dir, "upgradingdb2.txt")) or os.path.exists(os.path.join(config_dir, "upgradingdb3.txt")))): - self.updateDB(db_ver,curr_ver) + self.updateDB(db_ver, curr_ver) - def updateDB(self,db_ver,curr_ver): - pass #TODO + def updateDB(self, db_ver, curr_ver): + pass # TODO def readDBVersion(self): cur = self.getCursor() @@ -524,12 +526,12 @@ def _execute(self, sql, args=None): if threading.currentThread().getName() == "MainThread": for sql_line in sql.split(";"): try: - #key, rest = sql_line.strip().split(" ", 1) + # key, rest = sql_line.strip().split(" ", 1) key = sql_line[:50] print >> sys.stderr, "sqlitecachedb.py: should not perform sql", key, "on GUI thread" # print_stack() except: - #key = sql.strip() + # key = sql.strip() key = sql_line if key: print >> sys.stderr, "sqlitecachedb.py: should not perform sql", key, "on GUI thread" @@ -553,7 +555,7 @@ def _execute(self, sql, args=None): thread_name = threading.currentThread().getName() print >> sys.stderr, '===', thread_name, '===\nSQL Type:', type(sql), '\n-----\n', sql, '\n-----\n', args, '\n======\n' - #return None + # return None # ARNODB: this is incorrect, it should reraise the exception # such that _transaction can rollback or recommit. # This bug already reported by Johan @@ -595,11 +597,11 @@ def transaction(self, sql=None, args=None): n = 0 sql_full = '' arg_list = [] - sql_queue = self.cache_transaction_table.get(thread_name,None) + sql_queue = self.cache_transaction_table.get(thread_name, None) if sql_queue: while True: try: - _sql,_args = sql_queue.pop(0) + _sql, _args = sql_queue.pop(0) except IndexError: break @@ -627,10 +629,10 @@ def _transaction(self, sql, args=None): sql = 'BEGIN TRANSACTION; \n' + sql + 'COMMIT TRANSACTION;' try: self._execute(sql, args) - except Exception,e: - self.commit_retry_if_busy_or_rollback(e,0,sql=sql) + except Exception, e: + self.commit_retry_if_busy_or_rollback(e, 0, sql=sql) - def commit_retry_if_busy_or_rollback(self,e,tries,sql=None): + def commit_retry_if_busy_or_rollback(self, e, tries, sql=None): """ Arno: SQL_BUSY errors happen at the beginning of the experiment, @@ -638,23 +640,23 @@ def commit_retry_if_busy_or_rollback(self,e,tries,sql=None): is not honoured for some reason. After the initial errors, they no longer occur. """ - print >>sys.stderr,"sqlcachedb: commit_retry: after",str(e),repr(sql) + print >> sys.stderr, "sqlcachedb: commit_retry: after", str(e), repr(sql) if str(e).startswith("BusyError"): try: self._execute("COMMIT") - except Exception,e2: - if tries < 5: #self.max_commit_retries + except Exception, e2: + if tries < 5: # self.max_commit_retries # Spec is unclear whether next commit will also has # 'busytimeout' seconds to try to get a write lock. - sleep(pow(2.0,tries+2)/100.0) - self.commit_retry_if_busy_or_rollback(e2,tries+1) + sleep(pow(2.0, tries + 2) / 100.0) + self.commit_retry_if_busy_or_rollback(e2, tries + 1) else: self.rollback(tries) - raise Exception,e2 + raise Exception, e2 else: self.rollback(tries) - m = "cachedb: TRANSACTION ERROR "+threading.currentThread().getName()+' '+str(e) + m = "cachedb: TRANSACTION ERROR " + threading.currentThread().getName() + ' ' + str(e) raise Exception, m @@ -666,52 +668,52 @@ def rollback(self, tries): # May be harmless, see above. Unfortunately they don't specify # what the error is when an attempt is made to roll back # an automatically rolled back transaction. - m = "cachedb: ROLLBACK ERROR "+threading.currentThread().getName()+' '+str(e) - #print >> sys.stderr, 'SQLite Database', m + m = "cachedb: ROLLBACK ERROR " + threading.currentThread().getName() + ' ' + str(e) + # print >> sys.stderr, 'SQLite Database', m raise Exception, m # -------- Write Operations -------- def insert_or_replace(self, table_name, commit=True, **argv): if len(argv) == 1: - sql = 'INSERT OR REPLACE INTO %s (%s) VALUES (?);'%(table_name, argv.keys()[0]) + sql = 'INSERT OR REPLACE INTO %s (%s) VALUES (?);' % (table_name, argv.keys()[0]) else: - questions = '?,'*len(argv) - sql = 'INSERT OR REPLACE INTO %s %s VALUES (%s);'%(table_name, tuple(argv.keys()), questions[:-1]) + questions = '?,' * len(argv) + sql = 'INSERT OR REPLACE INTO %s %s VALUES (%s);' % (table_name, tuple(argv.keys()), questions[:-1]) self.execute_write(sql, argv.values(), commit) def insert_or_ignore(self, table_name, commit=True, **argv): if len(argv) == 1: - sql = 'INSERT OR IGNORE INTO %s (%s) VALUES (?);'%(table_name, argv.keys()[0]) + sql = 'INSERT OR IGNORE INTO %s (%s) VALUES (?);' % (table_name, argv.keys()[0]) else: - questions = '?,'*len(argv) - sql = 'INSERT OR IGNORE INTO %s %s VALUES (%s);'%(table_name, tuple(argv.keys()), questions[:-1]) + questions = '?,' * len(argv) + sql = 'INSERT OR IGNORE INTO %s %s VALUES (%s);' % (table_name, tuple(argv.keys()), questions[:-1]) self.execute_write(sql, argv.values(), commit) def insert(self, table_name, commit=True, **argv): if len(argv) == 1: - sql = 'INSERT INTO %s (%s) VALUES (?);'%(table_name, argv.keys()[0]) + sql = 'INSERT INTO %s (%s) VALUES (?);' % (table_name, argv.keys()[0]) else: - questions = '?,'*len(argv) - sql = 'INSERT INTO %s %s VALUES (%s);'%(table_name, tuple(argv.keys()), questions[:-1]) + questions = '?,' * len(argv) + sql = 'INSERT INTO %s %s VALUES (%s);' % (table_name, tuple(argv.keys()), questions[:-1]) self.execute_write(sql, argv.values(), commit) def insertMany(self, table_name, values, keys=None, commit=True): """ values must be a list of tuples """ - questions = u'?,'*len(values[0]) + questions = u'?,' * len(values[0]) if keys is None: - sql = u'INSERT INTO %s VALUES (%s);'%(table_name, questions[:-1]) + sql = u'INSERT INTO %s VALUES (%s);' % (table_name, questions[:-1]) else: - sql = u'INSERT INTO %s %s VALUES (%s);'%(table_name, tuple(keys), questions[:-1]) + sql = u'INSERT INTO %s %s VALUES (%s);' % (table_name, tuple(keys), questions[:-1]) self.executemany(sql, values, commit=commit) def update(self, table_name, where=None, commit=True, **argv): assert len(argv) > 0, 'NO VALUES TO UPDATE SPECIFIED' if len(argv) > 0: - sql = u'UPDATE %s SET '%table_name + sql = u'UPDATE %s SET ' % table_name arg = [] - for k,v in argv.iteritems(): + for k, v in argv.iteritems(): if type(v) is tuple: sql += u'%s %s ?,' % (k, v[0]) arg.append(v[1]) @@ -720,13 +722,13 @@ def update(self, table_name, where=None, commit=True, **argv): arg.append(v) sql = sql[:-1] if where != None: - sql += u' where %s'%where + sql += u' where %s' % where self.execute_write(sql, arg, commit) def delete(self, table_name, commit=True, **argv): - sql = u'DELETE FROM %s WHERE '%table_name + sql = u'DELETE FROM %s WHERE ' % table_name arg = [] - for k,v in argv.iteritems(): + for k, v in argv.iteritems(): if type(v) is tuple: sql += u'%s %s ? AND ' % (k, v[0]) arg.append(v[1]) @@ -738,7 +740,7 @@ def delete(self, table_name, commit=True, **argv): # -------- Read Operations -------- def size(self, table_name): - num_rec_sql = u"SELECT count(*) FROM %s LIMIT 1"%table_name + num_rec_sql = u"SELECT count(*) FROM %s LIMIT 1" % table_name result = self.fetchone(num_rec_sql) return result @@ -756,7 +758,7 @@ def fetchone(self, sql, args=None): find = find[0] else: return NULL - if len(find)>1: + if len(find) > 1: return find else: return find[0] @@ -767,7 +769,7 @@ def fetchall(self, sql, args=None, retry=0): find = list(res) return find else: - return [] # should it return None? + return [] # should it return None? def getOne(self, table_name, value_name, where=None, conj='and', **kw): """ value_name could be a string, a tuple of strings, or '*' @@ -787,17 +789,17 @@ def getOne(self, table_name, value_name, where=None, conj='and', **kw): else: table_names = table_name - sql = u'select %s from %s'%(value_names, table_names) + sql = u'select %s from %s' % (value_names, table_names) if where or kw: sql += u' where ' if where: sql += where if kw: - sql += u' %s '%conj + sql += u' %s ' % conj if kw: arg = [] - for k,v in kw.iteritems(): + for k, v in kw.iteritems(): if type(v) is tuple: operator = v[0] arg.append(v[1]) @@ -811,7 +813,7 @@ def getOne(self, table_name, value_name, where=None, conj='and', **kw): arg = None # print >> sys.stderr, 'SQL: %s %s' % (sql, arg) - return self.fetchone(sql,arg) + return self.fetchone(sql, arg) def getAll(self, table_name, value_name, where=None, group_by=None, having=None, order_by=None, limit=None, offset=None, conj='and', **kw): """ value_name could be a string, or a tuple of strings @@ -832,17 +834,17 @@ def getAll(self, table_name, value_name, where=None, group_by=None, having=None, else: table_names = table_name - sql = u'select %s from %s'%(value_names, table_names) + sql = u'select %s from %s' % (value_names, table_names) if where or kw: sql += u' where ' if where: sql += where if kw: - sql += u' %s '%conj + sql += u' %s ' % conj if kw: arg = [] - for k,v in kw.iteritems(): + for k, v in kw.iteritems(): if type(v) is tuple: operator = v[0] arg.append(v[1]) @@ -861,11 +863,11 @@ def getAll(self, table_name, value_name, where=None, group_by=None, having=None, if having != None: sql += u' having ' + having if order_by != None: - sql += u' order by ' + order_by # you should add desc after order_by to reversely sort, i.e, 'last_seen desc' as order_by + sql += u' order by ' + order_by # you should add desc after order_by to reversely sort, i.e, 'last_seen desc' as order_by if limit != None: - sql += u' limit %d'%limit + sql += u' limit %d' % limit if offset != None: - sql += u' offset %d'%offset + sql += u' offset %d' % offset try: return self.fetchall(sql, arg) or [] @@ -887,7 +889,7 @@ def insertPeer(self, permid, update=True, commit=True, **argv): if peer_id != None: peer_existed = True if update: - where=u'peer_id=%d'%peer_id + where = u'peer_id=%d' % peer_id self.update('Peer', where, commit=commit, **argv) else: self.insert_or_ignore('Peer', permid=bin2str(permid), commit=commit, **argv) @@ -912,7 +914,7 @@ def deletePeer(self, permid=None, peer_id=None, force=True, commit=True): def getPeerID(self, permid): assert isinstance(permid, str), permid # permid must be binary - peer_id = self.permid_id.get(permid,None) + peer_id = self.permid_id.get(permid, None) if peer_id is not None: return peer_id @@ -933,8 +935,8 @@ def getPeerIDS(self, permids): to_select.append(bin2str(permid)) if len(to_select) > 0: - parameters = ", ".join('?'*len(to_select)) - sql_get_peer_ids = "SELECT peer_id, permid FROM Peer WHERE permid IN ("+parameters+")" + parameters = ", ".join('?' * len(to_select)) + sql_get_peer_ids = "SELECT peer_id, permid FROM Peer WHERE permid IN (" + parameters + ")" peerids = self.fetchall(sql_get_peer_ids, to_select) for peer_id, permid in peerids: self.permid_id[str2bin(permid)] = peer_id @@ -987,7 +989,7 @@ def getTorrentID(self, infohash): assert isinstance(infohash, str), "INFOHASH has invalid type: %s" % type(infohash) assert len(infohash) == INFOHASH_LENGTH, "INFOHASH has invalid length: %d" % len(infohash) - tid = self.infohash_id.get(infohash,None) + tid = self.infohash_id.get(infohash, None) if tid is not None: return tid @@ -1009,8 +1011,8 @@ def getTorrentIDS(self, infohashes): while len(to_select) > 0: nrToQuery = min(len(to_select), 50) - parameters = '?,'*nrToQuery - sql_get_torrent_ids = "SELECT torrent_id, infohash FROM Torrent WHERE infohash IN ("+parameters[:-1]+")" + parameters = '?,' * nrToQuery + sql_get_torrent_ids = "SELECT torrent_id, infohash FROM Torrent WHERE infohash IN (" + parameters[:-1] + ")" torrents = self.fetchall(sql_get_torrent_ids, to_select[:nrToQuery]) for torrent_id, infohash in torrents: @@ -1112,7 +1114,7 @@ def updateDB(self, fromver, tover): self.execute_write(sql, commit=False) if fromver < 4: - sql=""" + sql = """ -- V2: Patch for VoteCast DROP TABLE IF EXISTS ModerationCast; @@ -1224,7 +1226,7 @@ def updateDB(self, fromver, tover): """ self.execute_write(sql, commit=False) if fromver < 5: - sql=\ + sql = \ """ -------------------------------------- -- Creating Subtitles (future RichMetadata) DB @@ -1314,7 +1316,7 @@ def updateDB(self, fromver, tover): seen = {} rows = self.fetchall(sql) for row in rows: - if row[0] in seen and row[2] in seen[row[0]]: #duplicate entry + if row[0] in seen and row[2] in seen[row[0]]: # duplicate entry self.execute_write(del_sql, (row[0], row[2])) self.execute_write(ins_sql, (row[0], row[1], row[2], row[3], row[4], row[5], row[6])) else: @@ -1324,7 +1326,7 @@ def updateDB(self, fromver, tover): self.execute_write(sql, commit=False) if fromver < 7: - sql=\ + sql = \ """ -------------------------------------- -- Creating TermFrequency DB @@ -1369,7 +1371,7 @@ def updateDB(self, fromver, tover): self.execute_write(sql, commit=False) if fromver < 8: - sql=\ + sql = \ """ -------------------------------------- -- Creating BundlerPreference DB @@ -1382,7 +1384,7 @@ def updateDB(self, fromver, tover): self.execute_write(sql, commit=False) if fromver < 9: - sql=\ + sql = \ """ CREATE TABLE IF NOT EXISTS _Channels ( id integer PRIMARY KEY ASC, @@ -1597,11 +1599,19 @@ def updateDB(self, fromver, tover): # InvertedIndex table. if TEST_SQLITECACHEDB_UPGRADE: state_dir = "." + torrent_dir = "." + my_permid = None else: from Tribler.Core.Session import Session session = Session.get_instance() state_dir = session.get_state_dir() - tmpfilename = os.path.join(state_dir,"upgradingdb.txt") + torrent_dir = session.get_torrent_collecting_dir() + + my_permid = session.get_permid() + if my_permid: + my_permid = bin2str(my_permid) + + tmpfilename = os.path.join(state_dir, "upgradingdb.txt") if fromver < 4 or os.path.exists(tmpfilename): def upgradeTorrents(): # fetch some un-inserted torrents to put into the InvertedIndex @@ -1697,7 +1707,7 @@ def upgradeTorrents(): records = self.fetchall(sql) termcount = {} - phrases = [] # torrent_id, term1, term2 + phrases = [] # torrent_id, term1, term2 for torrent_id, name in records: terms = set(extractor.extractTerms(name)) phrase = extractor.extractBiTermPhrase(name) @@ -1717,7 +1727,7 @@ def upgradeTorrents(): if DEBUG: dbg_ts2 = time() - print >>sys.stderr, 'DB Upgradation: extracting and inserting terms took %ss' % (dbg_ts2-dbg_ts1) + print >> sys.stderr, 'DB Upgradation: extracting and inserting terms took %ss' % (dbg_ts2 - dbg_ts1) if fromver < 8: if DEBUG: @@ -1727,7 +1737,7 @@ def upgradeTorrents(): from Tribler.Core.Search.SearchManager import split_into_keywords - #due to a bug, we have to insert all keywords with a length of 2 + # due to a bug, we have to insert all keywords with a length of 2 sql = "SELECT torrent_id, name FROM CollectedTorrent" records = self.fetchall(sql) @@ -1747,7 +1757,7 @@ def upgradeTorrents(): if DEBUG: print >> sys.stderr, "INSERTING NEW KEYWORDS TOOK", time.time() - t1, "INSERTING took", time.time() - t2 - tmpfilename = os.path.join(state_dir,"upgradingdb2.txt") + tmpfilename = os.path.join(state_dir, "upgradingdb2.txt") if fromver < 9 or os.path.exists(tmpfilename): from Tribler.Core.Session import Session from time import time @@ -1767,12 +1777,7 @@ def upgradeTorrents(): except: print >> sys.stderr, "DB Upgradation: failed to create temp-file" - session = Session.get_instance() - my_permid = session.get_permid() - if my_permid: - my_permid = bin2str(my_permid) - - #start converting channelcastdb to new format + # start converting channelcastdb to new format finished_convert = "SELECT name FROM sqlite_master WHERE name='ChannelCast'" select_channels = "SELECT publisher_id, min(time_stamp), max(time_stamp) FROM ChannelCast WHERE publisher_name <> '' GROUP BY publisher_id" select_channel_name = "SELECT publisher_name FROM ChannelCast WHERE publisher_id = ? AND time_stamp = ? LIMIT 1" @@ -1791,13 +1796,13 @@ def upgradeTorrents(): if self.fetchone(finished_convert) == 'ChannelCast': - #placeholders for dispersy channel conversion + # placeholders for dispersy channel conversion my_channel_name = None to_be_inserted = [] t1 = time() - #create channels + # create channels permid_peerid = {} channel_permid_cid = {} channels = self.fetchall(select_channels) @@ -1817,9 +1822,9 @@ def upgradeTorrents(): to_be_inserted = [] - #insert torrents + # insert torrents for publisher_id, peer_id in permid_peerid.iteritems(): - torrents = self.fetchall(select_channel_torrent, (publisher_id, )) + torrents = self.fetchall(select_channel_torrent, (publisher_id,)) channel_id = self.fetchone(select_channel_id, (peer_id,)) channel_permid_cid[publisher_id] = channel_id @@ -1827,14 +1832,14 @@ def upgradeTorrents(): for torrent_id, time_stamp in torrents: to_be_inserted.append((-1, torrent_id, channel_id, long(time_stamp), long(time_stamp))) - self.execute_write(update_channel, (len(torrents), channel_id), commit = False) + self.execute_write(update_channel, (len(torrents), channel_id), commit=False) self.executemany(insert_channel_contents, to_be_inserted) - #convert votes + # convert votes to_be_inserted = [] votes = self.fetchall(select_votes) for mod_id, voter_id, vote, time_stamp in votes: - if mod_id != my_permid: #cannot yet convert votes on my channel + if mod_id != my_permid: # cannot yet convert votes on my channel channel_id = channel_permid_cid.get(mod_id, None) @@ -1848,7 +1853,7 @@ def upgradeTorrents(): self.executemany(insert_vote, to_be_inserted) - #set cached nr_spam and nr_favorites + # set cached nr_spam and nr_favorites votes = {} select_pos_vote = "SELECT channel_id, count(*) FROM ChannelVotes WHERE vote == 2 GROUP BY channel_id" select_neg_vote = "SELECT channel_id, count(*) FROM ChannelVotes WHERE vote == -1 GROUP BY channel_id" @@ -1868,15 +1873,15 @@ def upgradeTorrents(): self.executemany(update_votes, channel_tuples) print >> sys.stderr, "Converting took", time() - t1 - self.execute_write('DELETE FROM VoteCast WHERE mod_id <> ?', (my_permid, ), commit = False) - self.execute_write('DELETE FROM ChannelCast WHERE publisher_id <> ?', (my_permid, )) + self.execute_write('DELETE FROM VoteCast WHERE mod_id <> ?', (my_permid,), commit=False) + self.execute_write('DELETE FROM ChannelCast WHERE publisher_id <> ?', (my_permid,)) select_mychannel_id = "SELECT id FROM Channels WHERE peer_id ISNULL LIMIT 1" select_votes_for_me = "SELECT voter_id, vote, time_stamp FROM VoteCast WHERE mod_id = ? Order By time_stamp ASC" select_mychannel_torrent = "SELECT CollectedTorrent.infohash, time_stamp, torrent_file_name FROM ChannelCast, CollectedTorrent WHERE publisher_id = ? AND ChannelCast.infohash = CollectedTorrent.infohash AND CollectedTorrent.torrent_id NOT IN (SELECT torrent_id FROM ChannelTorrents WHERE channel_id = ?) ORDER BY time_stamp DESC LIMIT ?" if my_channel_name: - def dispersy_started(subject,changeType,objectID): + def dispersy_started(subject, changeType, objectID): print >> sys.stderr, "Dispersy started" community = None @@ -1891,7 +1896,7 @@ def create_my_channel(): dispersy_cid = self.fetchone("SELECT dispersy_cid FROM Channels WHERE id = ?", (channel_id,)) dispersy_cid = str(dispersy_cid) - community = dispersy.get_community(dispersy_cid) + community = dispersy.get_community(dispersy_cid) else: print >> sys.stderr, "Dispersy started, creating community" @@ -1901,11 +1906,11 @@ def create_my_channel(): print >> sys.stderr, "Dispersy started, community created" - #insert votes + # insert votes insert_votes_for_me() - #schedule insert torrents - dispersy.callback.register(insert_my_torrents, delay = 10.0) + # schedule insert torrents + dispersy.callback.register(insert_my_torrents, delay=10.0) def insert_votes_for_me(): print >> sys.stderr, "Dispersy started, inserting votes" @@ -1913,7 +1918,7 @@ def insert_votes_for_me(): to_be_inserted = [] - votes = self.fetchall(select_votes_for_me, (my_permid, )) + votes = self.fetchall(select_votes_for_me, (my_permid,)) for voter_id, vote, time_stamp in votes: peer_id = self.getPeerID(str2bin(voter_id)) if peer_id: @@ -1954,34 +1959,34 @@ def insert_my_torrents(): files = torrentdef.get_files_as_unicode_with_length() to_be_inserted.append((infohash, timestamp, torrentdef.get_name_as_unicode(), tuple(files), torrentdef.get_trackers_as_single_tuple())) else: - to_be_removed.append((bin2str(infohash), )) + to_be_removed.append((bin2str(infohash),)) if len(torrents) > 0: - if len(to_be_inserted)>0: - community._disp_create_torrents(to_be_inserted, forward = False) + if len(to_be_inserted) > 0: + community._disp_create_torrents(to_be_inserted, forward=False) - if len(to_be_removed)>0: + if len(to_be_removed) > 0: self.executemany("DELETE FROM ChannelCast WHERE infohash = ?", to_be_removed) - dispersy.callback.register(insert_my_torrents, delay = 10.0) + dispersy.callback.register(insert_my_torrents, delay=10.0) - else: #done + else: # done drop_channelcast = "DROP TABLE ChannelCast" self.execute_write(drop_channelcast) drop_votecast = "DROP TABLE VoteCast" self.execute_write(drop_votecast) else: - dispersy.callback.register(insert_my_torrents, delay = 10.0) + dispersy.callback.register(insert_my_torrents, delay=10.0) from Tribler.community.channel.community import ChannelCommunity from Tribler.dispersy.dispersy import Dispersy from Tribler.Core.TorrentDef import TorrentDef global _callback - _callback.register(create_my_channel, delay = 10.0) + _callback.register(create_my_channel, delay=10.0) session.remove_observer(dispersy_started) - session.add_observer(dispersy_started,NTFY_DISPERSY,[NTFY_STARTED]) + session.add_observer(dispersy_started, NTFY_DISPERSY, [NTFY_STARTED]) else: drop_channelcast = "DROP TABLE ChannelCast" self.execute_write(drop_channelcast) @@ -2001,7 +2006,7 @@ def upgradeTorrents(): records = self.fetchall(sql) if len(records) == 0: - #self.execute_write("DROP TABLE InvertedIndex") + # self.execute_write("DROP TABLE InvertedIndex") # upgradation is complete and hence delete the temp file os.remove(tmpfilename) @@ -2014,9 +2019,9 @@ def upgradeTorrents(): try: torrent_filename = os.path.join(torrent_dir, torrent_filename) - #.torrent found, return complete filename + # .torrent found, return complete filename if not os.path.isfile(torrent_filename): - #.torrent not found, possibly a new torrent_collecting_dir + # .torrent not found, possibly a new torrent_collecting_dir torrent_filename = get_collected_torrent_filename(str2bin(infohash)) torrent_filename = os.path.join(torrent_dir, torrent_filename) @@ -2025,7 +2030,7 @@ def upgradeTorrents(): torrentdef = TorrentDef.load(torrent_filename) - #Making sure that swarmname does not include extension for single file torrents + # Making sure that swarmname does not include extension for single file torrents swarmname = torrentdef.get_name_as_unicode() if not torrentdef.is_multifile_torrent(): swarmname, _ = os.path.splitext(swarmname) @@ -2034,7 +2039,7 @@ def upgradeTorrents(): fileextensions = set() for filename in torrentdef.get_files_as_unicode(): filename, extension = os.path.splitext(filename) - for keyword in split_into_keywords(filename, filterStopwords = True): + for keyword in split_into_keywords(filename, filterStopwords=True): filedict[keyword] = filedict.get(keyword, 0) + 1 fileextensions.add(extension[1:]) @@ -2043,7 +2048,7 @@ def upgradeTorrents(): if len(filenames) > 1000: def popSort(a, b): return filedict[a] - filedict[b] - filenames.sort(cmp = popSort, reverse = True) + filenames.sort(cmp=popSort, reverse=True) filenames = filenames[:1000] except RuntimeError: @@ -2055,7 +2060,7 @@ def popSort(a, b): swarmname, extension = os.path.splitext(swarmname) fileextensions.add(extension[1:]) - filenames.extend(split_into_keywords(swarmname, filterStopwords = True)) + filenames.extend(split_into_keywords(swarmname, filterStopwords=True)) values.append((torrent_id, swarmname, " ".join(filenames), " ".join(fileextensions))) @@ -2097,25 +2102,25 @@ def popSort(a, b): self.execute_write(index) if fromver < 12: - remove_indexes = ["Message_receive_time_idx","Size_calc_age_idx","Number_of_seeders_idx","Number_of_leechers_idx","Torrent_length_idx","Torrent_num_seeders_idx","Torrent_num_leechers_idx"] + remove_indexes = ["Message_receive_time_idx", "Size_calc_age_idx", "Number_of_seeders_idx", "Number_of_leechers_idx", "Torrent_length_idx", "Torrent_num_seeders_idx", "Torrent_num_leechers_idx"] for index in remove_indexes: - self.execute_write("DROP INDEX %s"%index, commit = False) + self.execute_write("DROP INDEX %s" % index, commit=False) - self.execute_write("CREATE INDEX Peer_local_oversion_idx ON Peer(is_local, oversion)", commit = False) - self.execute_write("CREATE INDEX torrent_tracker_last_idx ON TorrentTracker (tracker, last_check)", commit = False) + self.execute_write("CREATE INDEX Peer_local_oversion_idx ON Peer(is_local, oversion)", commit=False) + self.execute_write("CREATE INDEX torrent_tracker_last_idx ON TorrentTracker (tracker, last_check)", commit=False) self.execute_write("CREATE INDEX IF NOT EXISTS ChannelTorChanIndex ON _ChannelTorrents(torrent_id, channel_id)") self.clean_db(True) if fromver < 13: - self.execute_write("INSERT INTO MetaDataTypes ('name') VALUES ('swift-url');", commit = False) + self.execute_write("INSERT INTO MetaDataTypes ('name') VALUES ('swift-url');", commit=False) - tmpfilename = os.path.join(state_dir,"upgradingdb3.txt") + tmpfilename = os.path.join(state_dir, "upgradingdb3.txt") if fromver < 14 or os.path.exists(tmpfilename): if fromver < 14: - self.execute_write("ALTER TABLE Torrent ADD COLUMN dispersy_id integer;", commit = False) - self.execute_write("ALTER TABLE Torrent ADD COLUMN swift_hash text;", commit = False) - self.execute_write("ALTER TABLE Torrent ADD COLUMN swift_torrent_hash text;", commit = False) - self.execute_write("CREATE INDEX Torrent_insert_idx ON Torrent (insert_time, swift_torrent_hash);", commit = False) + self.execute_write("ALTER TABLE Torrent ADD COLUMN dispersy_id integer;", commit=False) + self.execute_write("ALTER TABLE Torrent ADD COLUMN swift_hash text;", commit=False) + self.execute_write("ALTER TABLE Torrent ADD COLUMN swift_torrent_hash text;", commit=False) + self.execute_write("CREATE INDEX Torrent_insert_idx ON Torrent (insert_time, swift_torrent_hash);", commit=False) self.execute_write("CREATE INDEX Torrent_info_roothash_idx ON Torrent (infohash, swift_torrent_hash);") # Create an empty file to mark the process of upgradation. @@ -2132,7 +2137,6 @@ def popSort(a, b): print >> sys.stderr, "DB Upgradation: temp-file successfully created" except: print >> sys.stderr, "DB Upgradation: failed to create temp-file" - session = Session.get_instance() def upgradeTorrents(): print >> sys.stderr, "Upgrading DB .. hashing torrents" @@ -2151,19 +2155,18 @@ def upgradeTorrents(): print >> sys.stderr, "DB Upgradation: temp-file deleted", tmpfilename return - torrent_dir = session.get_torrent_collecting_dir() for infohash, torrent_filename in records: if not os.path.isfile(torrent_filename): torrent_filename = os.path.join(torrent_dir, torrent_filename) - #.torrent found, return complete filename + # .torrent found, return complete filename if not os.path.isfile(torrent_filename): - #.torrent not found, use default collected_torrent_filename + # .torrent not found, use default collected_torrent_filename torrent_filename = get_collected_torrent_filename(str2bin(infohash)) torrent_filename = os.path.join(torrent_dir, torrent_filename) if not os.path.isfile(torrent_filename): - not_found.append((infohash, )) + not_found.append((infohash,)) else: sdef, swiftpath = rth._write_to_collected(torrent_filename) found.append((bin2str(sdef.get_roothash()), swiftpath, infohash)) @@ -2219,7 +2222,7 @@ def upgradeTorrents(): self.execute_write("DROP TABLE IF EXISTS SubtitlesHave") self.execute_write("DROP INDEX IF EXISTS subtitles_have_idx") - self.execute_write("DROP INDEX IF EXISTS subtitles_have_ts", commit = True) + self.execute_write("DROP INDEX IF EXISTS subtitles_have_ts", commit=True) update = list(self.execute_read("SELECT peer_id, torrent_id, term_id, term_order FROM ClicklogSearch")) results = self.execute_read("SELECT ClicklogTerm.term_id, TermFrequency.term_id FROM TermFrequency, ClicklogTerm WHERE TermFrequency.term == ClicklogTerm.term") @@ -2236,7 +2239,7 @@ def upgradeTorrents(): self.execute_write("DROP INDEX IF EXISTS idx_terms_term") self.execute_write("DELETE FROM Peer WHERE superpeer = 1") - self.execute_write("DROP VIEW IF EXISTS SuperPeer", commit = True) + self.execute_write("DROP VIEW IF EXISTS SuperPeer", commit=True) self.execute_write("DROP INDEX IF EXISTS Peer_name_idx") self.execute_write("DROP INDEX IF EXISTS Peer_ip_idx") @@ -2257,12 +2260,12 @@ def upgradeTorrents(): self.execute_write("INSERT INTO MetaDataTypes ('name') VALUES ('video-info')") - def clean_db(self, vacuum = False): + def clean_db(self, vacuum=False): from time import time - self.execute_write("DELETE FROM TorrentBiTermPhrase WHERE torrent_id NOT IN (SELECT torrent_id FROM CollectedTorrent)", commit = False) - self.execute_write("DELETE FROM ClicklogSearch WHERE peer_id <> 0", commit = False) - self.execute_write("DELETE FROM TorrentFiles where torrent_id in (select torrent_id from CollectedTorrent)", commit = False) + self.execute_write("DELETE FROM TorrentBiTermPhrase WHERE torrent_id NOT IN (SELECT torrent_id FROM CollectedTorrent)", commit=False) + self.execute_write("DELETE FROM ClicklogSearch WHERE peer_id <> 0", commit=False) + self.execute_write("DELETE FROM TorrentFiles where torrent_id in (select torrent_id from CollectedTorrent)", commit=False) self.execute_write("DELETE FROM Torrent where name is NULL and torrent_id not in (select torrent_id from _ChannelTorrents)") if vacuum: @@ -2274,7 +2277,7 @@ def clean_db(self, vacuum = False): _callback = None _callback_lock = RLock() -def try_register(db, callback = None): +def try_register(db, callback=None): global _callback, _callback_lock if not _callback: @@ -2287,11 +2290,11 @@ def try_register(db, callback = None): _callback = callback if db: - if currentThread().getName()== 'Dispersy': + if currentThread().getName() == 'Dispersy': db.initialBegin() else: - #Niels: 15/05/2012: initalBegin HAS to be on the dispersy thread, as transactions are not shared across threads. - _callback.register(db.initialBegin, priority = 1024) + # Niels: 15/05/2012: initalBegin HAS to be on the dispersy thread, as transactions are not shared across threads. + _callback.register(db.initialBegin, priority=1024) finally: _callback_lock.release() @@ -2304,7 +2307,7 @@ def register_task(db, *args, **kwargs): if not _callback: try_register(db) if not _callback or not _callback.is_running: - def fakeDispersy(call, args=(), kwargs = {}): + def fakeDispersy(call, args=(), kwargs={}): call(*args, **kwargs) return fakeDispersy(*args) return _callback.register(*args, **kwargs) @@ -2315,24 +2318,24 @@ def call_task(db, *args, **kwargs): try_register(db) if not _callback or not _callback.is_running: - def fakeDispersy(call, args=(), kwargs = {}): + def fakeDispersy(call, args=(), kwargs={}): return call(*args, **kwargs) return fakeDispersy(*args) return _callback.call(*args, **kwargs) def onDBThread(): - return currentThread().getName()== 'Dispersy' + return currentThread().getName() == 'Dispersy' def forceDBThread(func): - def invoke_func(*args,**kwargs): + def invoke_func(*args, **kwargs): if not onDBThread(): if TRHEADING_DEBUG: stack = inspect.stack() callerstr = "" for i in range(1, min(4, len(stack))): caller = stack[i] - callerstr += "%s %s:%s "%(caller[3],caller[1],caller[2]) - print >> sys.stderr, long(time()), "SWITCHING TO DBTHREAD %s %s:%s called by %s"%(func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno, callerstr) + callerstr += "%s %s:%s " % (caller[3], caller[1], caller[2]) + print >> sys.stderr, long(time()), "SWITCHING TO DBTHREAD %s %s:%s called by %s" % (func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno, callerstr) register_task(None, func, args, kwargs) else: @@ -2342,17 +2345,17 @@ def invoke_func(*args,**kwargs): return invoke_func def forcePrioDBThread(func): - def invoke_func(*args,**kwargs): + def invoke_func(*args, **kwargs): if not onDBThread(): if TRHEADING_DEBUG: stack = inspect.stack() callerstr = "" for i in range(1, min(4, len(stack))): caller = stack[i] - callerstr += "%s %s:%s "%(caller[3],caller[1],caller[2]) - print >> sys.stderr, long(time()), "SWITCHING TO DBTHREAD %s %s:%s called by %s"%(func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno, callerstr) + callerstr += "%s %s:%s " % (caller[3], caller[1], caller[2]) + print >> sys.stderr, long(time()), "SWITCHING TO DBTHREAD %s %s:%s called by %s" % (func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno, callerstr) - register_task(None, func, args, kwargs, priority = 99) + register_task(None, func, args, kwargs, priority=99) else: func(*args, **kwargs) @@ -2360,7 +2363,7 @@ def invoke_func(*args,**kwargs): return invoke_func def forceAndReturnDBThread(func): - def invoke_func(*args,**kwargs): + def invoke_func(*args, **kwargs): global _callback if not onDBThread(): @@ -2369,10 +2372,10 @@ def invoke_func(*args,**kwargs): callerstr = "" for i in range(1, min(4, len(stack))): caller = stack[i] - callerstr += "%s %s:%s"%(caller[3],caller[1],caller[2]) - print >> sys.stderr, long(time()), "SWITCHING TO DBTHREAD %s %s:%s called by %s"%(func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno, callerstr) + callerstr += "%s %s:%s" % (caller[3], caller[1], caller[2]) + print >> sys.stderr, long(time()), "SWITCHING TO DBTHREAD %s %s:%s called by %s" % (func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno, callerstr) - return call_task(None, func, args, kwargs, timeout = 15.0, priority = 99) + return call_task(None, func, args, kwargs, timeout=15.0, priority=99) else: return func(*args, **kwargs) @@ -2393,7 +2396,7 @@ def getInstance(cls, *args, **kw): try: if cls.__single is None: cls.__single = cls(*args, **kw) - #print >>sys.stderr,"SqliteCacheDB: getInstance: created is",cls,cls.__single + # print >>sys.stderr,"SqliteCacheDB: getInstance: created is",cls,cls.__single finally: cls.lock.release() return cls.__single @@ -2420,7 +2423,7 @@ def initialBegin(self): try: print >> sys.stderr, "SQLiteNoCacheDB.initialBegin: BEGIN" self._execute("BEGIN;") - + except: print >> sys.stderr, "INITIAL BEGIN FAILED" raise @@ -2428,7 +2431,7 @@ def initialBegin(self): _shouldCommit = True @forceDBThread - def commitNow(self, vacuum = False, exiting = False): + def commitNow(self, vacuum=False, exiting=False): global _shouldCommit, _cacheCommit if _cacheCommit and _shouldCommit and onDBThread(): try: @@ -2454,7 +2457,7 @@ def commitNow(self, vacuum = False, exiting = False): else: print >> sys.stderr, "SQLiteNoCacheDB.commitNow: not calling BEGIN exiting" - #print_stack() + # print_stack() elif vacuum: self._execute("VACUUM;") @@ -2489,7 +2492,7 @@ def commit(self): if DEPRECATION_DEBUG: raise DeprecationWarning('Please do not use commit') - def clean_db(self, vacuum = False, exiting = False): + def clean_db(self, vacuum=False, exiting=False): SQLiteCacheDBV5.clean_db(self, False) if vacuum: @@ -2516,11 +2519,11 @@ def _execute(self, sql, args=None): if args is None: f.write('QueryDebug: (%f) %s\n' % (time(), sql)) - for row in cur.execute('EXPLAIN QUERY PLAN '+sql).fetchall(): + for row in cur.execute('EXPLAIN QUERY PLAN ' + sql).fetchall(): f.write('%s %s %s\t%s\n' % row) else: f.write('QueryDebug: (%f) %s %s\n' % (time(), sql, str(args))) - for row in cur.execute('EXPLAIN QUERY PLAN '+sql, args).fetchall(): + for row in cur.execute('EXPLAIN QUERY PLAN ' + sql, args).fetchall(): f.write('%s %s %s\t%s\n' % row[:4]) try: @@ -2557,11 +2560,11 @@ def _executemany(self, sql, args=None): if args is None: f.write('QueryDebug-executemany: (%f) %s\n' % (time(), sql)) - for row in cur.executemany('EXPLAIN QUERY PLAN '+sql).fetchall(): + for row in cur.executemany('EXPLAIN QUERY PLAN ' + sql).fetchall(): f.write('%s %s %s\t%s\n' % row) else: f.write('QueryDebug-executemany: (%f) %s %d times\n' % (time(), sql, len(args))) - for row in cur.executemany('EXPLAIN QUERY PLAN '+sql, args).fetchall(): + for row in cur.executemany('EXPLAIN QUERY PLAN ' + sql, args).fetchall(): f.write('%s %s %s\t%s\n' % row) try: @@ -2588,7 +2591,7 @@ def _executemany(self, sql, args=None): # Arno, 2012-08-02: If this becomes multithreaded again, reinstate safe_dict() in caches class SQLiteCacheDB(SQLiteNoCacheDB): - __single = None # used for multithreaded singletons pattern + __single = None # used for multithreaded singletons pattern @classmethod def getInstance(cls, *args, **kw): @@ -2598,7 +2601,7 @@ def getInstance(cls, *args, **kw): try: if cls.__single is None: cls.__single = cls(*args, **kw) - #print >>sys.stderr,"SqliteCacheDB: getInstance: created is",cls,cls.__single + # print >>sys.stderr,"SqliteCacheDB: getInstance: created is",cls,cls.__single finally: cls.lock.release() return cls.__single diff --git a/Tribler/Core/Tag/Extraction.py b/Tribler/Core/Tag/Extraction.py index ec92a74a8b4..d25ecbc0cf5 100644 --- a/Tribler/Core/Tag/Extraction.py +++ b/Tribler/Core/Tag/Extraction.py @@ -14,11 +14,11 @@ class TermExtraction: __single = None lock = threading.Lock() - + def getInstance(*args, **kw): # Singleton pattern with double-checking if TermExtraction.__single is None: - TermExtraction.lock.acquire() + TermExtraction.lock.acquire() try: if TermExtraction.__single is None: TermExtraction(*args, **kw) @@ -26,23 +26,20 @@ def getInstance(*args, **kw): TermExtraction.lock.release() return TermExtraction.__single getInstance = staticmethod(getInstance) - - def __init__(self): + + def __init__(self, install_dir='.'): if TermExtraction.__single is not None: raise RuntimeError, "TermExtraction is singleton" TermExtraction.__single = self - - from Tribler.Core.Session import Session - self.session = Session.get_instance() - - filterfn = os.path.join(self.session.get_install_dir(),LIBRARYNAME,'Core','Tag','stop_snowball.filter') + + filterfn = os.path.join(install_dir, LIBRARYNAME, 'Core', 'Tag', 'stop_snowball.filter') self.stopwords_filter = StopwordsFilter(stopwordsfilename=filterfn) - - self.containsdigits_filter = re.compile(r'\d',re.UNICODE) - self.alldigits_filter = re.compile(r'^\d*$',re.UNICODE) - self.isepisode_filter = re.compile(r'^s\d{2}e\d{2}',re.UNICODE) - - self.domain_terms = set('www net com org'.split()) + + self.containsdigits_filter = re.compile(r'\d', re.UNICODE) + self.alldigits_filter = re.compile(r'^\d*$', re.UNICODE) + self.isepisode_filter = re.compile(r'^s\d{2}e\d{2}', re.UNICODE) + + self.domain_terms = set('www net com org'.split()) def extractTerms(self, name_or_keywords): """ @@ -57,7 +54,7 @@ def extractTerms(self, name_or_keywords): keywords = split_into_keywords(name_or_keywords) else: keywords = name_or_keywords - + return [term for term in keywords if self.isSuitableTerm(term)] def extractBiTermPhrase(self, name_or_keywords): diff --git a/Tribler/Main/tribler.py b/Tribler/Main/tribler.py index 1e62e876cf9..61c4297906e 100644 --- a/Tribler/Main/tribler.py +++ b/Tribler/Main/tribler.py @@ -13,6 +13,7 @@ ######################################################################### import logging.config +from Tribler.Core.Tag.Extraction import TermExtraction logging.config.fileConfig("logger.conf") # Arno: M2Crypto overrides the method for https:// in the @@ -189,6 +190,8 @@ def __init__(self, params, single_instance_checker, installdir): cat = Category.getInstance(self.utility.getPath()) cat.init_from_main(self.utility) + TermExtraction.getInstance(self.utility.getPath()) + # Put it here so an error is shown in the startup-error popup # Start server for instance2instance communication self.i2iconnhandler = InstanceConnectionHandler(self.i2ithread_readlinecallback) diff --git a/Tribler/Test/bak_tribler_sdb.py b/Tribler/Test/bak_tribler_sdb.py index fe6c3cda72a..120610a2025 100644 --- a/Tribler/Test/bak_tribler_sdb.py +++ b/Tribler/Test/bak_tribler_sdb.py @@ -5,7 +5,8 @@ DB_FILE_NAME = 'tribler.sdb' DB_DIR_NAME = None -FILES_DIR = os.path.abspath(os.path.join('extend_db_dir')) + +FILES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'extend_db_dir')) TRIBLER_DB_PATH = os.path.join(FILES_DIR, 'tribler.sdb') TRIBLER_DB_PATH_BACKUP = os.path.join(FILES_DIR, 'bak_tribler.sdb') STATE_FILE_NAME_PATH = os.path.join(FILES_DIR, 'tribler.sdb-journal') @@ -19,22 +20,20 @@ def init_bak_tribler_sdb(): if os.path.isfile(TRIBLER_DB_PATH_BACKUP): copyFile(TRIBLER_DB_PATH_BACKUP, TRIBLER_DB_PATH) - #print "refresh sqlite db", TRIBLER_DB_PATH + # print "refresh sqlite db", TRIBLER_DB_PATH if os.path.exists(STATE_FILE_NAME_PATH): os.remove(STATE_FILE_NAME_PATH) print "remove journal file" - - def extract_db_files(file_dir, file_name): try: import tarfile - tar=tarfile.open(os.path.join(file_dir, file_name), 'r|gz') + tar = tarfile.open(os.path.join(file_dir, file_name), 'r|gz') for member in tar: print "extract file", member tar.extract(member) - dest = os.path.join(file_dir,member.name) + dest = os.path.join(file_dir, member.name) dest_dir = os.path.dirname(dest) if not os.path.isdir(dest_dir): os.makedirs(dest_dir) diff --git a/Tribler/Test/test_sqlitecachedb.py b/Tribler/Test/test_sqlitecachedb.py index bf882bcfe75..4187d702588 100644 --- a/Tribler/Test/test_sqlitecachedb.py +++ b/Tribler/Test/test_sqlitecachedb.py @@ -4,19 +4,19 @@ from traceback import print_exc import thread from threading import Thread -from time import time,sleep +from time import time, sleep import math from random import shuffle import apsw -from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, DEFAULT_BUSY_TIMEOUT,CURRENT_MAIN_DB_VERSION +from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, DEFAULT_BUSY_TIMEOUT, CURRENT_MAIN_DB_VERSION from bak_tribler_sdb import * -CREATE_SQL_FILE = os.path.join('..',"schema_sdb_v"+str(CURRENT_MAIN_DB_VERSION)+".sql") +CREATE_SQL_FILE = os.path.join('..', "schema_sdb_v" + str(CURRENT_MAIN_DB_VERSION) + ".sql") import Tribler.Core.CacheDB.sqlitecachedb -print >>sys.stderr,"TEST: ENABLE DBUPGRADE HACK" +print >> sys.stderr, "TEST: ENABLE DBUPGRADE HACK" Tribler.Core.CacheDB.sqlitecachedb.TEST_SQLITECACHEDB_UPGRADE = True def init(): @@ -38,8 +38,8 @@ def openDB(self, *args, **argv): def initDB(self, *args, **argv): self.db.initDB(*args, **argv) - #self.remove_t_index() - #self.remove_p_index() + # self.remove_t_index() + # self.remove_p_index() def remove_t_index(self): indices = [ @@ -48,7 +48,7 @@ def remove_t_index(self): 'Torrent_relevance_idx', 'Torrent_num_seeders_idx', 'Torrent_num_leechers_idx', - #'Torrent_name_idx', + # 'Torrent_name_idx', ] for index in indices: sql = 'drop index ' + index @@ -84,52 +84,52 @@ def test(self): def testBrowseItems(self, table_name, limit, order=None, where='', num_pages=50, shuffle_page=True): start = time() nrec = self.db.size(table_name) - pages = int(math.ceil(1.0*nrec/limit)) + pages = int(math.ceil(1.0 * nrec / limit)) offsets = [] for i in range(pages): - offset = i*limit + offset = i * limit offsets.append(offset) if shuffle_page: shuffle(offsets) - sql = "SELECT * FROM %s"%table_name + sql = "SELECT * FROM %s" % table_name if where: - sql += " WHERE %s"%where + sql += " WHERE %s" % where if order: - sql += " ORDER BY %s"%order + sql += " ORDER BY %s" % order if limit: - sql += " LIMIT %s"%limit + sql += " LIMIT %s" % limit sql += " OFFSET ?" nrec = 0 npage = 0 - print 'browse %7s by %14s:'%(table_name, order), + print 'browse %7s by %14s:' % (table_name, order), if where: print where, sys.stdout.flush() start2 = time() long_time = 0 - for offset in offsets[-1*num_pages:]: + for offset in offsets[-1 * num_pages:]: res = self.db.fetchall(sql, (offset,)) nrec += len(res) npage += 1 now = time() past = now - start2 start2 = now - if past>1: + if past > 1: print >> sys.stderr, npage, past sys.stderr.flush() long_time += 1 - if long_time>=10: # at most 10 times long waiting + if long_time >= 10: # at most 10 times long waiting break if npage == 0: return 1 - total_time = time()-start - page_time = total_time/npage + total_time = time() - start + page_time = total_time / npage if page_time > 0: - pages_sec = 1/page_time + pages_sec = 1 / page_time else: pages_sec = 0 - print '%5.4f %6.1f %4d %2d %5.3f'%(page_time, pages_sec, nrec, npage, total_time) + print '%5.4f %6.1f %4d %2d %5.3f' % (page_time, pages_sec, nrec, npage, total_time) sys.stdout.flush() return page_time @@ -137,28 +137,28 @@ def banchTestBrowse(self, table_name, nitems, sort_keys): nrecs = self.db.size(table_name) page_times = [] for key in sort_keys: - page_time=self.testBrowseItems(table_name, nitems, key) + page_time = self.testBrowseItems(table_name, nitems, key) page_times.append(page_time) table_row = page_times[:] - table_row.insert(0, nrecs) # insert second - table_row.insert(0, 'test') # insert first - avg_sorted_page_time = sum(page_times[1:])/len(page_times[1:]) - table_row.insert(len(sort_keys)*2, avg_sorted_page_time) # insert last - table_row.insert(len(sort_keys)*2, 1.0/avg_sorted_page_time) # insert last + table_row.insert(0, nrecs) # insert second + table_row.insert(0, 'test') # insert first + avg_sorted_page_time = sum(page_times[1:]) / len(page_times[1:]) + table_row.insert(len(sort_keys) * 2, avg_sorted_page_time) # insert last + table_row.insert(len(sort_keys) * 2, 1.0 / avg_sorted_page_time) # insert last return table_row def printTableRow(self, table_row): - print '|| %5s'%table_row[0], - print '||%6d'%table_row[1], + print '|| %5s' % table_row[0], + print '||%6d' % table_row[1], for i in range(len(table_row[2:-1])): - print '|| %5.4f'%table_row[i+2], - print '|| %5.1f ||'%table_row[-1] + print '|| %5.4f' % table_row[i + 2], + print '|| %5.1f ||' % table_row[-1] def testBrowse(self): - #print "page_time, pages_sec, nrec, num_pages, total_time" + # print "page_time, pages_sec, nrec, num_pages, total_time" nitems = 20 table_name = 'CollectedTorrent' - torrent_sort_keys = [None, 'length','creation_date', 'num_seeders', 'num_leechers', 'relevance', 'source_id', 'name'] + torrent_sort_keys = [None, 'length', 'creation_date', 'num_seeders', 'num_leechers', 'relevance', 'source_id', 'name'] torrent_table_row = self.banchTestBrowse(table_name, nitems, torrent_sort_keys) print table_name = 'Peer' @@ -167,7 +167,7 @@ def testBrowse(self): print type = 'test' - if type=='test': + if type == 'test': print '|| DB Type || #Torrents', for key in torrent_sort_keys: print '||', key, @@ -175,7 +175,7 @@ def testBrowse(self): self.printTableRow(torrent_table_row) - if type=='test': + if type == 'test': print '|| DB Type || #Peers', for key in peer_sort_keys: print '||', key, @@ -188,19 +188,19 @@ def testBrowseCategory(self): nitems = 20 table_name = 'CollectedTorrent' key = 'num_seeders' - categories = range(1,9) + categories = range(1, 9) nrecs = self.db.size(table_name) page_times = [] for category in categories: - where = 'category_id=%d'%category - page_time=self.testBrowseItems(table_name, nitems, key, where) + where = 'category_id=%d' % category + page_time = self.testBrowseItems(table_name, nitems, key, where) page_times.append(page_time) table_row = page_times[:] - table_row.insert(0, nrecs) # insert second - table_row.insert(0, 'test') # insert first - avg_sorted_page_time = sum(page_times[1:])/len(page_times[1:]) - table_row.insert(len(categories)*2, avg_sorted_page_time) # insert last - table_row.insert(len(categories)*2, 1.0/avg_sorted_page_time) # insert last + table_row.insert(0, nrecs) # insert second + table_row.insert(0, 'test') # insert first + avg_sorted_page_time = sum(page_times[1:]) / len(page_times[1:]) + table_row.insert(len(categories) * 2, avg_sorted_page_time) # insert last + table_row.insert(len(categories) * 2, 1.0 / avg_sorted_page_time) # insert last cat_name = {1: 'Video', 2: 'VideoClips', @@ -246,7 +246,7 @@ def testGetSimilarTorrents(self, num, num_sim=10): torrent_id = torrent_id[0] skip_begin = time() pop_torrent = self.getNumOwners(torrent_id) - skip_time += time()-skip_begin + skip_time += time() - skip_begin if pop_torrent < 2: continue sql = """ @@ -261,40 +261,40 @@ def testGetSimilarTorrents(self, num, num_sim=10): real_num2 += 1 # - #print len(sim_torrents) + # print len(sim_torrents) if len(sim_torrents) > num: for sim_torrent_id, com in sim_torrents: - if com < 1 or sim_torrent_id==torrent_id: + if com < 1 or sim_torrent_id == torrent_id: continue pop_sim_torrent = self.getNumOwners(sim_torrent_id) - sim = com/(pop_sim_torrent*pop_torrent)**0.5 - sim_res.append((sim,sim_torrent_id)) + sim = com / (pop_sim_torrent * pop_torrent) ** 0.5 + sim_res.append((sim, sim_torrent_id)) sim_res.sort() sim_res.reverse() - sim_torrents_id = tuple([int(ti) for (sim,ti) in sim_res[:num_sim]]) + sim_torrents_id = tuple([int(ti) for (sim, ti) in sim_res[:num_sim]]) else: - sim_torrents_id = tuple([int(ti) for (ti,co) in sim_torrents]) + sim_torrents_id = tuple([int(ti) for (ti, co) in sim_torrents]) if len(sim_torrents_id) > 0: if len(sim_torrents_id) == 1: - sim_torrents = '(' + str(sim_torrents_id[0]) +')' + sim_torrents = '(' + str(sim_torrents_id[0]) + ')' else: sim_torrents = repr(sim_torrents_id) sql = "select name,torrent_id from CollectedTorrent where torrent_id in " + \ sim_torrents + " order by name" sim_names = self.db.fetchall(sql) - #for name,ti in sim_names: + # for name,ti in sim_names: # print name, ti - #print res - past = time()-start - if real_num>0: - if real_num2>0: - print "Time for sim torrent %.4f %.4f"%(past/real_num, (past-skip_time)/real_num2), past, real_num, real_num2 + # print res + past = time() - start + if real_num > 0: + if real_num2 > 0: + print "Time for sim torrent %.4f %.4f" % (past / real_num, (past - skip_time) / real_num2), past, real_num, real_num2 else: - print "Time for sim torrent %.4f"%(past/real_num), '-', past, real_num, real_num2 - return past/num + print "Time for sim torrent %.4f" % (past / real_num), '-', past, real_num, real_num2 + return past / num return 1 # TODO: @@ -317,9 +317,9 @@ def testGetPeerHistory(self, num): """ res = self.db.fetchall(sql, (peer_id,)) real_num += 1 - past = time()-start - if real_num>0: - print "Time for peer history %.4f"%(past/real_num), past, real_num + past = time() - start + if real_num > 0: + print "Time for peer history %.4f" % (past / real_num), past, real_num class TestSQLitePerformance(unittest.TestCase): @@ -385,14 +385,18 @@ def setUp(self): self.db_path = 'tmp.db' if os.path.exists(self.db_path): os.remove(self.db_path) + self.db_name = os.path.split(self.db_path)[1] + def tearDown(self): - db = SQLiteCacheDB.getInstance() - db.close(clean=True) + SQLiteCacheDB.getInstance().close(clean=True) + SQLiteCacheDB.delInstance() + if os.path.exists(self.db_path): os.remove(self.db_path) + def test_open_close_db(self): sqlite_test = SQLiteCacheDB.getInstance() sqlite_test.openDB(self.db_path, 0) @@ -414,7 +418,7 @@ def basic_funcs(self): db.createDBTable(create_sql, self.db_path) db.insert('person', lastname='a', firstname='b') one = db.fetchone('select * from person') - assert one == ('a','b') + assert one == ('a', 'b') one = db.fetchone("select lastname from person where firstname == 'b'") assert one == 'a' @@ -424,7 +428,7 @@ def basic_funcs(self): values = [] for i in range(100): - value = (str(i), str(i**2)) + value = (str(i), str(i ** 2)) values.append(value) db.insertMany('person', values) all = db.fetchall('select * from person') @@ -486,13 +490,13 @@ def test_insertPeer(self): db = SQLiteCacheDB.getInstance() db.createDBTable(create_sql, self.db_path) assert db.size('Peer') == 0 - fake_permid_x = 'fake_permid_x'+'0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04' + fake_permid_x = 'fake_permid_x' + '0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04' peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 'name':'fake peer x'} permid = peer_x.pop('permid') db.insertPeer(permid, update=False, **peer_x) assert db.size('Peer') == 1 assert db.getOne('Peer', 'name', peer_id=1) == peer_x['name'] - peer_x['port']=456 + peer_x['port'] = 456 db.insertPeer(permid, update=False, **peer_x) assert db.getOne('Peer', 'port', peer_id=1) == 234 db.insertPeer(permid, update=True, **peer_x) @@ -507,9 +511,9 @@ def setUp(self): SQLiteCacheDB.DEBUG = False def tearDown(self): - db = SQLiteCacheDB.getInstance() - db.close(clean=True) - del db + SQLiteCacheDB.getInstance().close(clean=True) + SQLiteCacheDB.delInstance() + if os.path.exists(self.db_path): os.remove(self.db_path) @@ -520,21 +524,21 @@ def create_db(self, db_path, busytimeout=DEFAULT_BUSY_TIMEOUT): f = open(tmp_sql_path, 'w') f.write(create_sql) f.close() - #print "initDB", db_path + # print "initDB", db_path db.initDB(db_path, tmp_sql_path, busytimeout=busytimeout, check_version=False) os.remove(tmp_sql_path) def write_data(self): db = SQLiteCacheDB.getInstance() - #db.begin() + # db.begin() db.insert('person', lastname='a', firstname='b') values = [] for i in range(100): - value = (str(i), str(i**2)) + value = (str(i), str(i ** 2)) values.append(value) db.insertMany('person', values) db.commit() - #db.begin() + # db.begin() db.commit() db.commit() db.close() @@ -542,7 +546,7 @@ def write_data(self): def read_data(self): db = SQLiteCacheDB.getInstance() one = db.fetchone('select * from person') - assert one == ('a','b'), str(one) + assert one == ('a', 'b'), str(one) one = db.fetchone("select lastname from person where firstname == 'b'") assert one == 'a' @@ -601,7 +605,7 @@ def test_basic_funcs_lib0(self): def test_new_thread_basic_funcs(self): # test create/write/read db by 3 different threads # 3 seperate connections should be created, one per thread - #print >> sys.stderr, '------>>>>> test_new_thread_basic_funcs', threading.currentThread().getName() + # print >> sys.stderr, '------>>>>> test_new_thread_basic_funcs', threading.currentThread().getName() self.create_db(self.db_path) thread.start_new_thread(self.write_data, ()) sleep(2) @@ -613,9 +617,9 @@ class Reader(Thread): def __init__(self, period): self.period = period Thread.__init__(self) - self.setName('Reader.'+self.getName()) + self.setName('Reader.' + self.getName()) self.read_locks = 0 - self.num = ' R%3s '%self.getName().split('-')[-1] + self.num = ' R%3s ' % self.getName().split('-')[-1] def keep_reading_data(self, period): db = SQLiteCacheDB.getInstance() @@ -627,7 +631,7 @@ def keep_reading_data(self, period): print "begin read", self.getName(), period, time() while True: et = time() - if et-st > period: + if et - st > period: break if DEBUG_R: print "...start read", self.getName(), time() @@ -635,7 +639,7 @@ def keep_reading_data(self, period): try: self.all = db.fetchall("select * from person") - self.last_read = time()-st + self.last_read = time() - st self.read_times += 1 except Exception, msg: print_exc() @@ -650,18 +654,18 @@ def keep_reading_data(self, period): sys.stdout.flush() # num = len(all) - #print "----------- read", self.getName(), num + # print "----------- read", self.getName(), num # if DEBUG_R: # if num>oldnum: # print self.getName(), "readed", num-oldnum # sys.stdout.flush() db.close() if DEBUG_R: - print "done read", self.getName(), len(self.all), time()-st + print "done read", self.getName(), len(self.all), time() - st sys.stdout.flush() - #assert self.read_locks == 0, self.read_locks + # assert self.read_locks == 0, self.read_locks def run(self): self.keep_reading_data(self.period) @@ -670,12 +674,12 @@ class Writer(Thread): def __init__(self, period, num_write, commit): self.period = period Thread.__init__(self) - self.setName('Writer.'+self.getName()) + self.setName('Writer.' + self.getName()) self.write_locks = 0 self.writes = 0 self.commit = commit self.num_write = num_write - self.num = ' W%3s '%self.getName().split('-')[-1] + self.num = ' W%3s ' % self.getName().split('-')[-1] def keep_writing_data(self, period, num_write, commit=False): db = SQLiteCacheDB.getInstance() @@ -690,29 +694,29 @@ def keep_writing_data(self, period, num_write, commit=False): try: while True: st = time() - if st-begin_time > period: + if st - begin_time > period: break - #db.begin() + # db.begin() values = [] for i in range(num_write): - value = (str(i)+'"'+"'", str(i**2)+'"'+"'") + value = (str(i) + '"' + "'", str(i ** 2) + '"' + "'") values.append(value) try: st = time() if DEBUG: - print '-'+self.num + "start write", self.getName(), self.writes, time()-begin_time + print '-' + self.num + "start write", self.getName(), self.writes, time() - begin_time sys.stdout.flush() sql = 'INSERT INTO person VALUES (?, ?)' db.executemany(sql, values, commit=commit) - self.last_write = time()-begin_time + self.last_write = time() - begin_time - write_time = time()-st + write_time = time() - st w_times.append(write_time) if DEBUG: - print '-'+self.num + "end write", self.getName(), '+', write_time + print '-' + self.num + "end write", self.getName(), '+', write_time sys.stdout.flush() self.writes += 1 except apsw.BusyError: @@ -722,33 +726,33 @@ def keep_writing_data(self, period, num_write, commit=False): s = "Writing/Commiting" else: s = "Writing" - print >> sys.stdout, '>'+self.num + "Locked while ", s, self.getName(), self.write_locks, time()-st + print >> sys.stdout, '>' + self.num + "Locked while ", s, self.getName(), self.write_locks, time() - st sys.stdout.flush() continue if SLEEP_W >= 0: - sleep(SLEEP_W/1000.0) + sleep(SLEEP_W / 1000.0) if DO_STH > 0: do_sth(DO_STH) except Exception, msg: print_exc() - print >> sys.stderr, "On Error", time(), begin_time, time()-begin_time, Exception, msg, self.getName() + print >> sys.stderr, "On Error", time(), begin_time, time() - begin_time, Exception, msg, self.getName() if INFO: avg_w = avg_c = max_w = max_c = min_w = min_c = -1 if len(w_times) > 0: - avg_w = sum(w_times)/len(w_times) + avg_w = sum(w_times) / len(w_times) max_w = max(w_times) min_w = min(w_times) - output = self.num + " # W Locks: %d;"%self.write_locks + " # W: %d;"%self.writes - output += " Time: %.1f;"%self.last_write + ' Min Avg Max W: %.2f %.2f %.2f '%(min_w, avg_w, max_w) + output = self.num + " # W Locks: %d;" % self.write_locks + " # W: %d;" % self.writes + output += " Time: %.1f;" % self.last_write + ' Min Avg Max W: %.2f %.2f %.2f ' % (min_w, avg_w, max_w) self.result = output db.commit() db.commit() - db.commit() # test if it got problem if it is called more than once + db.commit() # test if it got problem if it is called more than once db.close() def run(self): @@ -764,7 +768,7 @@ def do_sth(n=300): l.sort() - def start_testing(nwriters,nreaders,write_period,num_write,read_period, + def start_testing(nwriters, nreaders, write_period, num_write, read_period, db_path, busytimeout, commit): self.create_db(db_path, busytimeout) if INFO: @@ -790,7 +794,7 @@ def start_testing(nwriters,nreaders,write_period,num_write,read_period, r.join() total_rlock += r.read_locks if INFO: - print >> sys.stdout, r.num, "# R Locks: %d;"%r.read_locks, "# R: %d;"%len(r.all), "Last read: %.3f;"%r.last_read, "Read Times:", r.read_times + print >> sys.stdout, r.num, "# R Locks: %d;" % r.read_locks, "# R: %d;" % len(r.all), "Last read: %.3f;" % r.last_read, "Read Times:", r.read_times sys.stdout.flush() del r @@ -805,11 +809,11 @@ def start_testing(nwriters,nreaders,write_period,num_write,read_period, return total_rlock, total_wlock - #sys.setcheckinterval(1) + # sys.setcheckinterval(1) DEBUG_R = False DEBUG = False INFO = False - SLEEP_W = -10 # millisecond. -1 to disable, otherwise indicate how long to sleep + SLEEP_W = -10 # millisecond. -1 to disable, otherwise indicate how long to sleep DO_STH = 0 NLOOPS = 1 total_rlock = total_wlock = 0 @@ -827,19 +831,3 @@ def start_testing(nwriters,nreaders,write_period,num_write,read_period, assert total_rlock == 0 and total_wlock == 0, (total_rlock, total_wlock) assert len(all) > 0, len(all) - -def test_suite(): - suite = unittest.TestSuite() - suite.addTest(unittest.makeSuite(TestSqliteCacheDB)) - suite.addTest(unittest.makeSuite(TestThreadedSqliteCacheDB)) - suite.addTest(unittest.makeSuite(TestSQLitePerformance)) - - return suite - -def main(): - init() - unittest.main(defaultTest='test_suite') - - -if __name__ == '__main__': - main() diff --git a/Tribler/Test/test_sqlitecachedbhandler.bat b/Tribler/Test/test_sqlitecachedbhandler.bat deleted file mode 100644 index 1ae064c21c2..00000000000 --- a/Tribler/Test/test_sqlitecachedbhandler.bat +++ /dev/null @@ -1,49 +0,0 @@ -set PYTHONPATH=..\.. - -python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_size -python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_getOne -python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_getAll - -python test_sqlitecachedbhandler.py TestSqliteMyDBHandler singtest_get -python test_sqlitecachedbhandler.py TestSqliteMyDBHandler singtest_put - -python test_sqlitecachedbhandler.py TestSuperPeerDBHandler singtest_setSuperPeer -python test_sqlitecachedbhandler.py TestSuperPeerDBHandler singtest_addExternalSuperPeer - -python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_size -python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_getFriends -python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_setFriendState -python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_addExternalFriend - -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getList -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeerSim -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeerList -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeers -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_addPeer -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_aa_hasPeer -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_findPeers -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_updatePeer -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_deletePeer -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_updatePeer -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPermIDByIP -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_loadPeers - -python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_getPrefList -python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_addPreference -python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_addPeerPreferences - -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtested_functions -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_count -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_loadTorrents -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_add_update_delete_Torrent -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_getCollectedTorrentHashes -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_freeSpace - -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getPrefList -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getCreationTime -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getRecentLivePrefList -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_hasMyPreference -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_addMyPreference_deletePreference -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_updateProgress -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getMyPrefListInfohash -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getMyPrefStats diff --git a/Tribler/Test/test_sqlitecachedbhandler.py b/Tribler/Test/test_sqlitecachedbhandler.py index a7fa5f28563..c8ce0153652 100644 --- a/Tribler/Test/test_sqlitecachedbhandler.py +++ b/Tribler/Test/test_sqlitecachedbhandler.py @@ -10,18 +10,16 @@ from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, DEFAULT_BUSY_TIMEOUT, CURRENT_MAIN_DB_VERSION from bak_tribler_sdb import * -CREATE_SQL_FILE = os.path.join('..', "schema_sdb_v" + str(CURRENT_MAIN_DB_VERSION) + ".sql") +CREATE_SQL_FILE = os.path.join('Tribler', "schema_sdb_v" + str(CURRENT_MAIN_DB_VERSION) + ".sql") import Tribler.Core.CacheDB.sqlitecachedb print >> sys.stderr, "TEST: ENABLE DBUPGRADE HACK" Tribler.Core.CacheDB.sqlitecachedb.TEST_SQLITECACHEDB_UPGRADE = True - from Tribler.Core.TorrentDef import TorrentDef from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB, bin2str, str2bin from Tribler.Core.CacheDB.SqliteCacheDBHandler import TorrentDBHandler, MyPreferenceDBHandler, BasicDBHandler, PeerDBHandler from Tribler.Category.Category import Category -from bak_tribler_sdb import * S_TORRENT_PATH_BACKUP = os.path.join(FILES_DIR, 'bak_single.torrent') S_TORRENT_PATH = os.path.join(FILES_DIR, 'single.torrent') @@ -32,67 +30,54 @@ BUSYTIMEOUT = 5000 SHOW_NOT_TESTED_FUNCTIONS = False # Enable this to show the functions not tested yet -def init(): - init_bak_tribler_sdb() - - SQLiteCacheDB.getInstance().initDB(TRIBLER_DB_PATH, busytimeout=BUSYTIMEOUT) - TorrentDBHandler.getInstance().register(Category.getInstance('..'), '.') - - -def getFuncs2Test(calss_name): - return filter(lambda s:s != 'lock' and not s.startswith('__') and s not in dir(BasicDBHandler), dir(calss_name)) - SQLiteCacheDB.DEBUG = False - class TestSqliteBasicDBHandler(unittest.TestCase): def setUp(self): - db_path = TRIBLER_DB_PATH + init_bak_tribler_sdb() self.sqlitedb = SQLiteCacheDB.getInstance() - self.sqlitedb.initDB(db_path, busytimeout=BUSYTIMEOUT) + self.sqlitedb.initDB(TRIBLER_DB_PATH, busytimeout=BUSYTIMEOUT) + + self.db = BasicDBHandler(self.sqlitedb, 'Peer') def tearDown(self): SQLiteCacheDB.getInstance().close() + SQLiteCacheDB.delInstance() - def singtest_size(self): - table_name = 'Peer' - db = BasicDBHandler(self.sqlitedb, table_name) - size = db.size() + def test_size(self): + size = self.db.size() assert size == 3995, size - def singtest_getOne(self): - table_name = 'Peer' - db = BasicDBHandler(self.sqlitedb, table_name) - - ip = db.getOne('ip', peer_id=1) + def test_getOne(self): + ip = self.db.getOne('ip', peer_id=1) assert ip == '1.1.1.1', ip - pid = db.getOne('peer_id', ip='1.1.1.1') + pid = self.db.getOne('peer_id', ip='1.1.1.1') assert pid == 1, pid - name = db.getOne('name', ip='1.1.1.1', port=1) + name = self.db.getOne('name', ip='1.1.1.1', port=1) assert name == 'Peer 1', name - name = db.getOne('name', ip='68.108.115.221', port=6882) + name = self.db.getOne('name', ip='68.108.115.221', port=6882) assert name == None, name - tid = db.getOne('peer_id', conj='OR', ip='1.1.1.1', name='Peer 1') + tid = self.db.getOne('peer_id', conj='OR', ip='1.1.1.1', name='Peer 1') assert tid == 1, tid - tid = db.getOne('peer_id', conj='OR', ip='1.1.1.1', name='asdfasfasfXXXXXXxx...') + tid = self.db.getOne('peer_id', conj='OR', ip='1.1.1.1', name='asdfasfasfXXXXXXxx...') assert tid == 1, tid - tid = db.getOne('peer_id', conj='OR', ip='1.1.1.123', name='Peer 1') + tid = self.db.getOne('peer_id', conj='OR', ip='1.1.1.123', name='Peer 1') assert tid == 1, tid - lbt = db.getOne('last_buddycast', peer_id=1) + lbt = self.db.getOne('last_buddycast', peer_id=1) assert lbt == 1193379432, lbt - name, ip, lbt = db.getOne(('name', 'ip', 'last_buddycast'), peer_id=1) + name, ip, lbt = self.db.getOne(('name', 'ip', 'last_buddycast'), peer_id=1) assert name == 'Peer 1' and ip == '1.1.1.1' and lbt == 1193379432, (name, ip, lbt) - values = db.getOne('*', peer_id=1) + values = self.db.getOne('*', peer_id=1) # 03/02/10 Boudewijn: In contrast to the content of the # database, the similarity value is not 12.537961593122299 but # 0 because it is reset as the database is upgraded. @@ -101,35 +86,32 @@ def singtest_getOne(self): for i in range(len(values)): assert values[i] == results[i], (i, values[i], results[i]) - def singtest_getAll(self): - table_name = 'Peer' - db = BasicDBHandler(self.sqlitedb, table_name) - - ips = db.getAll('ip') + def test_getAll(self): + ips = self.db.getAll('ip') assert len(ips) == 3995, len(ips) - ips = db.getAll('distinct ip') + ips = self.db.getAll('distinct ip') assert len(ips) == 256, len(ips) - ips = db.getAll('ip', "ip like '130.%'") + ips = self.db.getAll('ip', "ip like '130.%'") assert len(ips) == 16, len(ips) - ids = db.getAll('peer_id', 'thumbnail is NULL') + ids = self.db.getAll('peer_id', 'thumbnail is NULL') assert len(ids) == 3995, len(ids) - ips = db.getAll('ip', "ip like '88.%'", port=88, conj='or') + ips = self.db.getAll('ip', "ip like '88.%'", port=88, conj='or') assert len(ips) == 16, len(ips) - ips = db.getAll('ip', "ip like '88.%'", port=88, order_by='ip') + ips = self.db.getAll('ip', "ip like '88.%'", port=88, order_by='ip') assert len(ips) == 1, len(ips) assert ips[0][0] == '88.88.88.88', ips[0] - names = db.getAll('name', "ip like '88.%'", order_by='ip', limit=4, offset=1) + names = self.db.getAll('name', "ip like '88.%'", order_by='ip', limit=4, offset=1) assert len(names) == 4 assert names[2][0] == 'Peer 856', names # select name from Peer where ip like '88.%' and port==7762 order by ip limit 4 offset 3 - ips = db.getAll('count(distinct ip), port', group_by='port') + ips = self.db.getAll('count(distinct ip), port', group_by='port') # select count(distinct ip), port from Peer group by port for nip, port in ips: if port == 6881: @@ -139,22 +121,27 @@ def singtest_getAll(self): class TestSqlitePeerDBHandler(unittest.TestCase): def setUp(self): - db_path = TRIBLER_DB_PATH + init_bak_tribler_sdb() db = SQLiteCacheDB.getInstance() - db.openDB(db_path, busytimeout=BUSYTIMEOUT) + db.openDB(TRIBLER_DB_PATH, busytimeout=BUSYTIMEOUT) + self.sp1 = '0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04\x00\\\xdfXv\xffX\xf2\xfe\x96\xe1_]\xf5\x1b\xb4\x91\x91\xa5I\xf0nl\x81\xd2A\xfb\xb7u)\x01T\xa9*)r\x9b\x81s\xb7j\xd2\xecrSg$;\xc8"7s\xecSF\xd3\x0bgK\x1c' self.sp2 = '0R0\x10\x06\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04\x01\xdb\x80+O\xd9N7`\xfc\xd3\xdd\xf0 \xfdC^\xc9\xd7@\x97\xaa\x91r\x1c\xdeL\xf2n\x9f\x00U\xc1A\xf9Ae?\xd8t}_c\x08\xb3G\xf8g@N! \xa0\x90M\xfb\xca\xcfZ@' fake_permid_x = 'fake_permid_x' + '0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04' hp = db.hasPeer(fake_permid_x) assert not hp + self.pdb = PeerDBHandler.getInstance() + def tearDown(self): SQLiteCacheDB.getInstance().close() + SQLiteCacheDB.delInstance() + + PeerDBHandler.delInstance() - def singtest_getList(self): - db = PeerDBHandler.getInstance() - sp1 = db.getPeer(self.sp1) - sp2 = db.getPeer(self.sp2) + def test_getList(self): + sp1 = self.pdb.getPeer(self.sp1) + sp2 = self.pdb.getPeer(self.sp2) assert isinstance(sp1, dict) assert isinstance(sp2, dict) print >> sys.stderr, "singtest_GETLIST SP1", `sp1` @@ -162,11 +149,10 @@ def singtest_getList(self): assert sp1['port'] == 628 assert sp2['port'] == 3287 - def singtest_getPeerSim(self): - db = PeerDBHandler.getInstance() + def test_getPeerSim(self): permid_str = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACPJqLjmKeMNRwkCNKkPH51gjQ5e7u4s2vWv9I/AALXtpf+bFPtY8cyFv6OCzisYDo+brgqOxAtuNZwP' permid = str2bin(permid_str) - sim = db.getPeerSim(permid) + sim = self.pdb.getPeerSim(permid) # 03/02/10 Boudewijn: In contrast to the content of the # database, the similarity value is not 5.82119645394964 but 0 # because it is reset as the database is upgraded. @@ -174,39 +160,36 @@ def singtest_getPeerSim(self): permid_str = 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAAB0XbUrw5b8CrTrMZST1SPyrzjgSzIE6ynALtlZASGAb+figVXRRGpKW6MSal3KnEm1/q0P3JPWrhCE' permid = str2bin(permid_str) - sim = db.getPeerSim(permid) + sim = self.pdb.getPeerSim(permid) assert sim == 0 - def singtest_getPeerList(self): - db = PeerDBHandler.getInstance() - peerlist = db.getPeerList() + def test_getPeerList(self): + peerlist = self.pdb.getPeerList() assert len(peerlist) == 3995 peerlist.sort() assert bin2str(peerlist[345]) == 'MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEACxVRvG/Gr19EAPJru2Z5gjctEzv973/PJCQIua2ATMP6euq+Kf4gYpdKbsB/PWqJnfY/wSKPHHfIByV' - def singtest_getPeers(self): - db = PeerDBHandler.getInstance() - peerlist = db.getPeerList() + def test_getPeers(self): + peerlist = self.pdb.getPeerList() peerlist.sort() pl = peerlist[:10] - peers = db.getPeers(pl, ['permid', 'peer_id', 'ip', 'port', 'name']) + peers = self.pdb.getPeers(pl, ['permid', 'peer_id', 'ip', 'port', 'name']) # for p in peers: print p assert peers[7]['name'] == 'Peer 7' assert peers[8]['name'] == 'Peer 8' assert peers[1]['ip'] == '1.1.1.1' assert peers[3]['peer_id'] == 3 - def singtest_addPeer(self): - db = PeerDBHandler.getInstance() + def test_addPeer(self): fake_permid_x = 'fake_permid_x' + '0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04' peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 'name':'fake peer x', 'last_seen':12345} - oldsize = db.size() - db.addPeer(fake_permid_x, peer_x) - assert db.size() == oldsize + 1, (db.size(), oldsize + 1) + oldsize = self.pdb.size() + self.pdb.addPeer(fake_permid_x, peer_x) + assert self.pdb.size() == oldsize + 1, (self.pdb.size(), oldsize + 1) # db.addPeer(fake_permid_x, peer_x) # assert db.size() == oldsize+1 - p = db.getPeer(fake_permid_x) + p = self.pdb.getPeer(fake_permid_x) assert p['ip'] == '1.2.3.4' assert p['port'] == 234 assert p['name'] == 'fake peer x' @@ -219,8 +202,8 @@ def singtest_addPeer(self): peer_x['ip'] = '4.3.2.1' peer_x['port'] = 432 peer_x['last_seen'] = 1234567 - db.addPeer(fake_permid_x, peer_x, update_dns=False) - p = db.getPeer(fake_permid_x) + self.pdb.addPeer(fake_permid_x, peer_x, update_dns=False) + p = self.pdb.getPeer(fake_permid_x) assert p['ip'] == '1.2.3.4' assert p['port'] == 234 assert p['last_seen'] == 1234567, p['last_seen'] @@ -228,16 +211,16 @@ def singtest_addPeer(self): peer_x['ip'] = '4.3.2.1' peer_x['port'] = 432 peer_x['last_seen'] = 12345 - db.addPeer(fake_permid_x, peer_x, update_dns=True) - p = db.getPeer(fake_permid_x) + self.pdb.addPeer(fake_permid_x, peer_x, update_dns=True) + p = self.pdb.getPeer(fake_permid_x) assert p['ip'] == '4.3.2.1' assert p['port'] == 432 assert p['last_seen'] == 12345 peer_x['ip'] = '1.2.3.1' peer_x['port'] = 234 - db.addPeer(fake_permid_x, peer_x, update_dns=False) - p = db.getPeer(fake_permid_x) + self.pdb.addPeer(fake_permid_x, peer_x, update_dns=False) + p = self.pdb.getPeer(fake_permid_x) assert p['ip'] == '4.3.2.1' assert p['port'] == 432 assert p['last_seen'] == 12345 @@ -245,239 +228,197 @@ def singtest_addPeer(self): peer_x['ip'] = '1.2.3.4' peer_x['port'] = 234 peer_x['last_seen'] = 1234569 - db.addPeer(fake_permid_x, peer_x, update_dns=True) - p = db.getPeer(fake_permid_x) + self.pdb.addPeer(fake_permid_x, peer_x, update_dns=True) + p = self.pdb.getPeer(fake_permid_x) assert p['ip'] == '1.2.3.4' assert p['port'] == 234 assert p['last_seen'] == 1234569 peer_x['ip'] = '1.2.3.5' peer_x['port'] = 236 - db.addPeer(fake_permid_x, peer_x, update_dns=True) - p = db.getPeer(fake_permid_x) + self.pdb.addPeer(fake_permid_x, peer_x, update_dns=True) + p = self.pdb.getPeer(fake_permid_x) assert p['ip'] == '1.2.3.5' assert p['port'] == 236 - db._db.deletePeer(fake_permid_x, force=True) - p = db.getPeer(fake_permid_x) + self.pdb._db.deletePeer(fake_permid_x, force=True) + p = self.pdb.getPeer(fake_permid_x) assert p == None - assert db.size() == oldsize + assert self.pdb.size() == oldsize - def singtest_aa_hasPeer(self): - db = PeerDBHandler.getInstance() - assert db.hasPeer(self.sp1) - assert db.hasPeer(self.sp2) + def test_aa_hasPeer(self): + assert self.pdb.hasPeer(self.sp1) + assert self.pdb.hasPeer(self.sp2) fake_permid_x = 'fake_permid_x' + '0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04' - assert not db.hasPeer(fake_permid_x) + assert not self.pdb.hasPeer(fake_permid_x) - def singtest_findPeers(self): - db = PeerDBHandler.getInstance() - find_list = db.findPeers('ip', '88.88.88.88') + def test_findPeers(self): + find_list = self.pdb.findPeers('ip', '88.88.88.88') assert len(find_list) == 16 - find_list = db.findPeers('ip', '1.2.3.4') + find_list = self.pdb.findPeers('ip', '1.2.3.4') assert len(find_list) == 0 - db = PeerDBHandler.getInstance() - find_list = db.findPeers('permid', self.sp1) + self.pdb = PeerDBHandler.getInstance() + find_list = self.pdb.findPeers('permid', self.sp1) assert len(find_list) == 1 and find_list[0]['permid'] == self.sp1 - # assert len(find_list) == 3 and 901 in find_list - def singtest_updatePeer(self): - db = PeerDBHandler.getInstance() + def test_updatePeer(self): fake_permid_x = 'fake_permid_x' + '0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04' peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 'name':'fake peer x', 'last_seen':12345} - oldsize = db.size() - db.addPeer(fake_permid_x, peer_x) - assert db.size() == oldsize + 1, (db.size(), oldsize + 1) - p = db.getPeer(fake_permid_x) + oldsize = self.pdb.size() + self.pdb.addPeer(fake_permid_x, peer_x) + assert self.pdb.size() == oldsize + 1, (self.pdb.size(), oldsize + 1) + p = self.pdb.getPeer(fake_permid_x) assert p['ip'] == '1.2.3.4' assert p['port'] == 234 assert p['name'] == 'fake peer x' - db.updatePeer(fake_permid_x, ip='4.3.2.1') - db.updatePeer(fake_permid_x, port=432) - db.updatePeer(fake_permid_x, last_seen=1234567) - p = db.getPeer(fake_permid_x) + self.pdb.updatePeer(fake_permid_x, ip='4.3.2.1') + self.pdb.updatePeer(fake_permid_x, port=432) + self.pdb.updatePeer(fake_permid_x, last_seen=1234567) + p = self.pdb.getPeer(fake_permid_x) assert p['ip'] == '4.3.2.1' assert p['port'] == 432 assert p['last_seen'] == 1234567 - db._db.deletePeer(fake_permid_x, force=True) - p = db.getPeer(fake_permid_x) + self.pdb._db.deletePeer(fake_permid_x, force=True) + p = self.pdb.getPeer(fake_permid_x) assert p == None - assert db.size() == oldsize + assert self.pdb.size() == oldsize - def singtest_deletePeer(self): - db = PeerDBHandler.getInstance() + def test_deletePeer(self): fake_permid_x = 'fake_permid_x' + '0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04' peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 'name':'fake peer x', 'last_seen':12345, 'friend':1, 'superpeer':0} - oldsize = db.size() - p = db.getPeer(fake_permid_x) + oldsize = self.pdb.size() + p = self.pdb.getPeer(fake_permid_x) assert p == None, p - db.addPeer(fake_permid_x, peer_x) - assert db.size() == oldsize + 1, (db.size(), oldsize + 1) - assert db.hasPeer(fake_permid_x) - p = db.getPeer(fake_permid_x) + self.pdb.addPeer(fake_permid_x, peer_x) + assert self.pdb.size() == oldsize + 1, (self.pdb.size(), oldsize + 1) + assert self.pdb.hasPeer(fake_permid_x) + p = self.pdb.getPeer(fake_permid_x) assert p != None - db.deletePeer(fake_permid_x, force=False) - assert db.hasPeer(fake_permid_x) + self.pdb.deletePeer(fake_permid_x, force=False) + assert self.pdb.hasPeer(fake_permid_x) - db.deletePeer(fake_permid_x, force=True) - assert db.size() == oldsize - assert not db.hasPeer(fake_permid_x) + self.pdb.deletePeer(fake_permid_x, force=True) + assert self.pdb.size() == oldsize + assert not self.pdb.hasPeer(fake_permid_x) - p = db.getPeer(fake_permid_x) + p = self.pdb.getPeer(fake_permid_x) assert p == None - db.deletePeer(fake_permid_x, force=True) - assert db.size() == oldsize + self.pdb.deletePeer(fake_permid_x, force=True) + assert self.pdb.size() == oldsize - p = db.getPeer(fake_permid_x) + p = self.pdb.getPeer(fake_permid_x) assert p == None, p - db.deletePeer(fake_permid_x, force=True) - assert db.size() == oldsize + self.pdb.deletePeer(fake_permid_x, force=True) + assert self.pdb.size() == oldsize - def singtest_updateTimes(self): - db = PeerDBHandler.getInstance() + def test_updateTimes(self): fake_permid_x = 'fake_permid_x' + '0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04' peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 'name':'fake peer x', 'last_seen':12345, 'connected_times':3} - oldsize = db.size() - p = db.getPeer(fake_permid_x) + oldsize = self.pdb.size() + p = self.pdb.getPeer(fake_permid_x) assert p == None, p - db.addPeer(fake_permid_x, peer_x) - assert db.hasPeer(fake_permid_x) - assert db.size() == oldsize + 1, (db.size(), oldsize + 1) + self.pdb.addPeer(fake_permid_x, peer_x) + assert self.pdb.hasPeer(fake_permid_x) + assert self.pdb.size() == oldsize + 1, (self.pdb.size(), oldsize + 1) - db.updateTimes(fake_permid_x, 'connected_times') + self.pdb.updateTimes(fake_permid_x, 'connected_times') sql = 'select connected_times from Peer where permid=' + repr(bin2str(fake_permid_x)) - ct = db._db.fetchone(sql) + ct = self.pdb._db.fetchone(sql) assert ct == 4, ct - db.updateTimes(fake_permid_x, 'buddycast_times') + self.pdb.updateTimes(fake_permid_x, 'buddycast_times') sql = 'select buddycast_times from Peer where permid=' + repr(bin2str(fake_permid_x)) - ct = db._db.fetchone(sql) + ct = self.pdb._db.fetchone(sql) assert ct == 1, ct - db.updateTimes(fake_permid_x, 'buddycast_times', 3) + self.pdb.updateTimes(fake_permid_x, 'buddycast_times', 3) sql = 'select buddycast_times from Peer where permid=' + repr(bin2str(fake_permid_x)) - ct = db._db.fetchone(sql) + ct = self.pdb._db.fetchone(sql) assert ct == 4, ct - db.deletePeer(fake_permid_x, force=True) - assert not db.hasPeer(fake_permid_x) + self.pdb.deletePeer(fake_permid_x, force=True) + assert not self.pdb.hasPeer(fake_permid_x) - def singtest_getPermIDByIP(self): - db = PeerDBHandler.getInstance() + def test_getPermIDByIP(self): fake_permid_x = 'fake_permid_x' + '0R0\x10\x00\x07*\x86H\xce=\x02\x01\x06\x05+\x81\x04\x00\x1a\x03>\x00\x04' peer_x = {'permid':fake_permid_x, 'ip':'1.2.3.4', 'port':234, 'name':'fake peer x', 'last_seen':12345, 'connected_times':3} - oldsize = db.size() - p = db.getPeer(fake_permid_x) + oldsize = self.pdb.size() + p = self.pdb.getPeer(fake_permid_x) assert p == None, p - db.addPeer(fake_permid_x, peer_x) - assert db.hasPeer(fake_permid_x) - assert db.size() == oldsize + 1, (db.size(), oldsize + 1) + self.pdb.addPeer(fake_permid_x, peer_x) + assert self.pdb.hasPeer(fake_permid_x) + assert self.pdb.size() == oldsize + 1, (self.pdb.size(), oldsize + 1) - permid = db.getPermIDByIP('1.2.3.4') + permid = self.pdb.getPermIDByIP('1.2.3.4') assert bin2str(permid) == bin2str(fake_permid_x) - db.deletePeer(fake_permid_x, force=True) - assert not db.hasPeer(fake_permid_x) - assert db.size() == oldsize + self.pdb.deletePeer(fake_permid_x, force=True) + assert not self.pdb.hasPeer(fake_permid_x) + assert self.pdb.size() == oldsize - def singtest_loadPeers(self): - db = PeerDBHandler.getInstance() - peer_size = db.size() - res = db.getGUIPeers() + def test_loadPeers(self): + peer_size = self.pdb.size() + res = self.pdb.getGUIPeers() assert len(res) == 1477, len(res) data = res[0] - p = db.getPeer(data['permid']) + p = self.pdb.getPeer(data['permid']) assert p['name'] == data['name'] assert 70 < len(data['permid']) < 90 # must be binary -##-------------------------------------------------------------------------------------------------------------------------- class TestTorrentDBHandler(unittest.TestCase): def setUp(self): - db_path = TRIBLER_DB_PATH + init_bak_tribler_sdb() db = SQLiteCacheDB.getInstance() - db.openDB(db_path, busytimeout=BUSYTIMEOUT) + db.openDB(TRIBLER_DB_PATH, busytimeout=BUSYTIMEOUT) + + self.tdb = TorrentDBHandler.getInstance() def tearDown(self): SQLiteCacheDB.getInstance().close() + SQLiteCacheDB.delInstance() + + TorrentDBHandler.delInstance() - def singtested_functions(self): - if SHOW_NOT_TESTED_FUNCTIONS: - all_funcs = getFuncs2Test(TorrentDBHandler) - tested_funcs = [ - "register", - "getInstance", - "hasTorrent", - "hasMetaData", - "getNumberTorrents", "_getCategoryID", - "getTorrents", - "size", - "getTorrentID", - "_addTorrentToDB", "_addTorrentTracker", - "getOne", - "getTracker", - "updateTorrent", - "updateTorrentRelevance", - "deleteTorrent", "_deleteTorrent", "eraseTorrentFile", - "getNumberCollectedTorrents", - "getTorrent", - "freeSpace", - "getInfohash", - ] - for func in all_funcs: - if func not in tested_funcs: - print "TestTorrentDBHandler: not test", func - -# def singtest_misc(self): -# db = TorrentDBHandler.getInstance() - - def _test_hasTorrent(self): + def test_hasTorrent(self): infohash_str = 'AA8cTG7ZuPsyblbRE7CyxsrKUCg=' infohash = str2bin(infohash_str) - db = TorrentDBHandler.getInstance() - assert db.hasTorrent(infohash) == True - assert db.hasMetaData(infohash) == True + assert self.tdb.hasTorrent(infohash) == True + assert self.tdb.hasMetaData(infohash) == True fake_infoahsh = 'fake_infohash_1' + '0R0\x10\x00\x07*\x86H\xce=\x02' - assert db.hasTorrent(fake_infoahsh) == False - assert db.hasMetaData(fake_infoahsh) == False + assert self.tdb.hasTorrent(fake_infoahsh) == False + assert self.tdb.hasMetaData(fake_infoahsh) == False - def singtest_count(self): - db = TorrentDBHandler.getInstance() - start = time() - num = db.getNumberTorrents() + def test_count(self): + num = self.tdb.getNumberTorrents() assert num == 4483 - def singtest_loadTorrents(self): - db = TorrentDBHandler.getInstance() - torrent_size = db._db.size('CollectedTorrent') - db2 = MyPreferenceDBHandler.getInstance() - mypref_size = db2.size() - res = db.getTorrents() - # ## assert len(res) == torrent_size - mypref_size, (len(res), torrent_size - mypref_size) - res = db.getTorrents() - len(res) == torrent_size + def test_loadTorrents(self): + torrent_size = self.tdb._db.size('CollectedTorrent') + res = self.tdb.getTorrents() + assert len(res) == torrent_size data = res[0] # print data - assert data['category'][0] in db.category_table.keys(), data['category'] - assert data['status'] in db.status_table.keys(), data['status'] - assert data['source'] in db.src_table.keys(), data['source'] + assert data['category'][0] in self.tdb.category_table.keys(), data['category'] + assert data['status'] in self.tdb.status_table.keys(), data['status'] + assert data['source'] in self.tdb.src_table.keys(), data['source'] assert len(data['infohash']) == 20 - def singtest_add_update_delete_Torrent(self): + def test_add_update_delete_Torrent(self): self.addTorrent() self.updateTorrent() self.deleteTorrent() @@ -486,17 +427,15 @@ def addTorrent(self): copyFile(S_TORRENT_PATH_BACKUP, S_TORRENT_PATH) copyFile(M_TORRENT_PATH_BACKUP, M_TORRENT_PATH) - db = TorrentDBHandler.getInstance() - - old_size = db.size() - old_src_size = db._db.size('TorrentSource') - old_tracker_size = db._db.size('TorrentTracker') + old_size = self.tdb.size() + old_src_size = self.tdb._db.size('TorrentSource') + old_tracker_size = self.tdb._db.size('TorrentTracker') s_infohash = unhexlify('44865489ac16e2f34ea0cd3043cfd970cc24ec09') m_infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98') - sid = db._db.getTorrentID(s_infohash) - mid = db._db.getTorrentID(m_infohash) + sid = self.tdb._db.getTorrentID(s_infohash) + mid = self.tdb._db.getTorrentID(m_infohash) single_torrent_file_path = os.path.join(FILES_DIR, 'single.torrent') multiple_torrent_file_path = os.path.join(FILES_DIR, 'multiple.torrent') @@ -507,54 +446,54 @@ def addTorrent(self): multiple_tdef = TorrentDef.load(multiple_torrent_file_path) assert m_infohash == multiple_tdef.get_infohash() - db.addExternalTorrent(single_tdef, extra_info={'filename':single_torrent_file_path}) - db.addExternalTorrent(multiple_tdef, source=src, extra_info={'filename':multiple_torrent_file_path}) + self.tdb.addExternalTorrent(single_tdef, extra_info={'filename':single_torrent_file_path}) + self.tdb.addExternalTorrent(multiple_tdef, source=src, extra_info={'filename':multiple_torrent_file_path}) - single_torrent_id = db._db.getTorrentID(s_infohash) - multiple_torrent_id = db._db.getTorrentID(m_infohash) + single_torrent_id = self.tdb._db.getTorrentID(s_infohash) + multiple_torrent_id = self.tdb._db.getTorrentID(m_infohash) - assert db.getInfohash(single_torrent_id) == s_infohash + assert self.tdb.getInfohash(single_torrent_id) == s_infohash single_name = 'Tribler_4.1.7_src.zip' multiple_name = 'Tribler_4.1.7_src' - assert db.size() == old_size + 2, old_size - db.size() - assert old_src_size + 1 == db._db.size('TorrentSource') - assert old_tracker_size + 2 == db._db.size('TorrentTracker'), db._db.size('TorrentTracker') - old_tracker_size + assert self.tdb.size() == old_size + 2, old_size - self.tdb.size() + assert old_src_size + 1 == self.tdb._db.size('TorrentSource') + assert old_tracker_size + 2 == self.tdb._db.size('TorrentTracker'), self.tdb._db.size('TorrentTracker') - old_tracker_size - sname = db.getOne('name', torrent_id=single_torrent_id) + sname = self.tdb.getOne('name', torrent_id=single_torrent_id) assert sname == single_name, (sname, single_name) - mname = db.getOne('name', torrent_id=multiple_torrent_id) + mname = self.tdb.getOne('name', torrent_id=multiple_torrent_id) assert mname == multiple_name, (mname, multiple_name) - s_size = db.getOne('length', torrent_id=single_torrent_id) + s_size = self.tdb.getOne('length', torrent_id=single_torrent_id) assert s_size == 1583233, s_size - m_size = db.getOne('length', torrent_id=multiple_torrent_id) + m_size = self.tdb.getOne('length', torrent_id=multiple_torrent_id) assert m_size == 5358560, m_size - cat = db.getOne('category_id', torrent_id=multiple_torrent_id) + cat = self.tdb.getOne('category_id', torrent_id=multiple_torrent_id) assert cat == 8, cat # other - sid = db._db.getOne('TorrentSource', 'source_id', name=src) + sid = self.tdb._db.getOne('TorrentSource', 'source_id', name=src) assert sid > 1 - m_sid = db.getOne('source_id', torrent_id=multiple_torrent_id) + m_sid = self.tdb.getOne('source_id', torrent_id=multiple_torrent_id) assert sid == m_sid - s_sid = db.getOne('source_id', torrent_id=single_torrent_id) + s_sid = self.tdb.getOne('source_id', torrent_id=single_torrent_id) assert 1 == s_sid - s_status = db.getOne('status_id', torrent_id=single_torrent_id) + s_status = self.tdb.getOne('status_id', torrent_id=single_torrent_id) assert s_status == 0 - m_comment = db.getOne('comment', torrent_id=multiple_torrent_id) + m_comment = self.tdb.getOne('comment', torrent_id=multiple_torrent_id) comments = 'www.tribler.org' assert m_comment.find(comments) > -1 comments = 'something not inside' assert m_comment.find(comments) == -1 - m_trackers = db.getTracker(m_infohash, 0) # db._db.getAll('TorrentTracker', 'tracker', 'torrent_id=%d'%multiple_torrent_id) + m_trackers = self.tdb.getTracker(m_infohash, 0) # db._db.getAll('TorrentTracker', 'tracker', 'torrent_id=%d'%multiple_torrent_id) assert len(m_trackers) == 1 assert ('http://tpb.tracker.thepiratebay.org/announce', 1) in m_trackers, m_trackers - s_torrent = db.getTorrent(s_infohash) - m_torrent = db.getTorrent(m_infohash) + s_torrent = self.tdb.getTorrent(s_infohash) + m_torrent = self.tdb.getTorrent(m_infohash) assert s_torrent['name'] == 'Tribler_4.1.7_src.zip', s_torrent['name'] assert m_torrent['name'] == 'Tribler_4.1.7_src', m_torrent['name'] assert m_torrent['last_check_time'] == 0 @@ -562,193 +501,162 @@ def addTorrent(self): assert len(m_torrent) == 16 def updateTorrent(self): - db = TorrentDBHandler.getInstance() - s_infohash = unhexlify('44865489ac16e2f34ea0cd3043cfd970cc24ec09') m_infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98') kw = {} - db.updateTorrent(m_infohash, relevance=3.1415926, category=['Videoclips'], + self.tdb.updateTorrent(m_infohash, relevance=3.1415926, category=['Videoclips'], status='good', progress=23.5, seeder=123, leecher=321, last_check_time=1234567, ignore_number=1, retry_number=2, other_key1='abcd', other_key2=123) - multiple_torrent_id = db._db.getTorrentID(m_infohash) - res_r = db.getOne('relevance', torrent_id=multiple_torrent_id) + multiple_torrent_id = self.tdb._db.getTorrentID(m_infohash) + res_r = self.tdb.getOne('relevance', torrent_id=multiple_torrent_id) # ## assert 3.1415926 == res_r - db.updateTorrentRelevance(m_infohash, 1.41421) - res_r = db.getOne('relevance', torrent_id=multiple_torrent_id) + self.tdb.updateTorrentRelevance(m_infohash, 1.41421) + res_r = self.tdb.getOne('relevance', torrent_id=multiple_torrent_id) # ## assert 1.41421 == res_r - cid = db.getOne('category_id', torrent_id=multiple_torrent_id) + cid = self.tdb.getOne('category_id', torrent_id=multiple_torrent_id) # ## assert cid == 2, cid - sid = db.getOne('status_id', torrent_id=multiple_torrent_id) + sid = self.tdb.getOne('status_id', torrent_id=multiple_torrent_id) assert sid == 1 - p = db.mypref_db.getOne('progress', torrent_id=multiple_torrent_id) + p = self.tdb.mypref_db.getOne('progress', torrent_id=multiple_torrent_id) assert p == None, p - seeder = db.getOne('num_seeders', torrent_id=multiple_torrent_id) + seeder = self.tdb.getOne('num_seeders', torrent_id=multiple_torrent_id) assert seeder == 123 - leecher = db.getOne('num_leechers', torrent_id=multiple_torrent_id) + leecher = self.tdb.getOne('num_leechers', torrent_id=multiple_torrent_id) assert leecher == 321 - last_check_time = db._db.getOne('TorrentTracker', 'last_check', announce_tier=1, torrent_id=multiple_torrent_id) + last_check_time = self.tdb._db.getOne('TorrentTracker', 'last_check', announce_tier=1, torrent_id=multiple_torrent_id) assert last_check_time == 1234567, last_check_time - ignore_number = db._db.getOne('TorrentTracker', 'ignored_times', announce_tier=1, torrent_id=multiple_torrent_id) + ignore_number = self.tdb._db.getOne('TorrentTracker', 'ignored_times', announce_tier=1, torrent_id=multiple_torrent_id) assert ignore_number == 1 - retry_number = db._db.getOne('TorrentTracker', 'retried_times', announce_tier=1, torrent_id=multiple_torrent_id) + retry_number = self.tdb._db.getOne('TorrentTracker', 'retried_times', announce_tier=1, torrent_id=multiple_torrent_id) assert retry_number == 2 def deleteTorrent(self): - db = TorrentDBHandler.getInstance() - db.torrent_dir = FILES_DIR + self.tdb.torrent_dir = FILES_DIR s_infohash = unhexlify('44865489ac16e2f34ea0cd3043cfd970cc24ec09') m_infohash = unhexlify('ed81da94d21ad1b305133f2726cdaec5a57fed98') - assert db.deleteTorrent(s_infohash, delete_file=True) - assert db.deleteTorrent(m_infohash) + assert self.tdb.deleteTorrent(s_infohash, delete_file=True) + assert self.tdb.deleteTorrent(m_infohash) - assert not db.hasTorrent(s_infohash) - assert not db.hasTorrent(m_infohash) + assert not self.tdb.hasTorrent(s_infohash) + assert not self.tdb.hasTorrent(m_infohash) assert not os.path.isfile(S_TORRENT_PATH) - m_trackers = db.getTracker(m_infohash, 0) + m_trackers = self.tdb.getTracker(m_infohash, 0) assert len(m_trackers) == 0 # fake_infoahsh = 'fake_infohash_1'+'0R0\x10\x00\x07*\x86H\xce=\x02' # 02/02/10 Boudewijn: infohashes must be 20 bytes long fake_infoahsh = 'fake_infohash_1' + '0R0\x10\x00' - assert not db.deleteTorrent(fake_infoahsh) + assert not self.tdb.deleteTorrent(fake_infoahsh) my_infohash_str_126 = 'ByJho7yj9mWY1ORWgCZykLbU1Xc=' my_infohash = str2bin(my_infohash_str_126) - assert not db.deleteTorrent(my_infohash) + assert not self.tdb.deleteTorrent(my_infohash) - def singtest_getCollectedTorrentHashes(self): - db = TorrentDBHandler.getInstance() - res = db.getNumberCollectedTorrents() + def test_getCollectedTorrentHashes(self): + res = self.tdb.getNumberCollectedTorrents() assert res == 4848, res - def singtest_freeSpace(self): - db = TorrentDBHandler.getInstance() - old_res = db.getNumberCollectedTorrents() - db.freeSpace(20) - res = db.getNumberCollectedTorrents() + def test_freeSpace(self): + old_res = self.tdb.getNumberCollectedTorrents() + self.tdb.freeSpace(20) + res = self.tdb.getNumberCollectedTorrents() assert old_res - res == 20 - init() - class TestMyPreferenceDBHandler(unittest.TestCase): def setUp(self): - db_path = TRIBLER_DB_PATH + init_bak_tribler_sdb() db = SQLiteCacheDB.getInstance() - db.openDB(db_path, busytimeout=BUSYTIMEOUT) - mypref_db = MyPreferenceDBHandler.getInstance() - mypref_db.loadData() + db.openDB(TRIBLER_DB_PATH, busytimeout=BUSYTIMEOUT) + + self.mdb = MyPreferenceDBHandler.getInstance() + self.mdb.loadData() def tearDown(self): SQLiteCacheDB.getInstance().close() + SQLiteCacheDB.delInstance() + MyPreferenceDBHandler.delInstance() - def singtest_getPrefList(self): - db = MyPreferenceDBHandler.getInstance() - pl = db.getMyPrefListInfohash() + def test_getPrefList(self): + pl = self.mdb.getMyPrefListInfohash() assert len(pl) == 12 - def singtest_getCreationTime(self): - db = MyPreferenceDBHandler.getInstance() + def test_getCreationTime(self): infohash_str_126 = 'ByJho7yj9mWY1ORWgCZykLbU1Xc=' infohash = str2bin(infohash_str_126) - ct = db.getCreationTime(infohash) + ct = self.mdb.getCreationTime(infohash) assert ct == 1194966300, ct - def singtest_getRecentLivePrefList(self): - db = MyPreferenceDBHandler.getInstance() - pl = db.getRecentLivePrefList() + def test_getRecentLivePrefList(self): + pl = self.mdb.getRecentLivePrefList() assert len(pl) == 11, (len(pl), pl) infohash_str_126 = 'ByJho7yj9mWY1ORWgCZykLbU1Xc=' assert bin2str(pl[0]) == infohash_str_126 infohash_str_1279 = 'R+grUhp884MnFkt6NuLnnauZFsc=' assert bin2str(pl[1]) == infohash_str_1279 - pl = db.getRecentLivePrefList(8) + pl = self.mdb.getRecentLivePrefList(8) assert len(pl) == 8, (len(pl), pl) assert bin2str(pl[0]) == infohash_str_126 assert bin2str(pl[1]) == infohash_str_1279 - def singtest_hasMyPreference(self): + def test_hasMyPreference(self): infohash_str_126 = 'ByJho7yj9mWY1ORWgCZykLbU1Xc=' infohash_str_1279 = 'R+grUhp884MnFkt6NuLnnauZFsc=' - db = MyPreferenceDBHandler.getInstance() - assert db.hasMyPreference(str2bin(infohash_str_126)) - assert db.hasMyPreference(str2bin(infohash_str_1279)) + assert self.mdb.hasMyPreference(str2bin(infohash_str_126)) + assert self.mdb.hasMyPreference(str2bin(infohash_str_1279)) # fake_infoahsh = 'fake_infohash_1'+'0R0\x10\x00\x07*\x86H\xce=\x02' # 02/02/10 Boudewijn: infohashes must be 20 bytes long fake_infoahsh = 'fake_infohash_1' + '0R0\x10\x00' - assert not db.hasMyPreference(fake_infoahsh) + assert not self.mdb.hasMyPreference(fake_infoahsh) - def singtest_addMyPreference_deletePreference(self): - db = MyPreferenceDBHandler.getInstance() - p = db.getOne(('torrent_id', 'destination_path', 'progress', 'creation_time'), torrent_id=126) + def test_addMyPreference_deletePreference(self): + p = self.mdb.getOne(('torrent_id', 'destination_path', 'progress', 'creation_time'), torrent_id=126) torrent_id = p[0] - infohash = db._db.getInfohash(torrent_id) + infohash = self.mdb._db.getInfohash(torrent_id) destpath = p[1] progress = p[2] creation_time = p[3] - db.deletePreference(infohash) - pl = db.getMyPrefListInfohash() + self.mdb.deletePreference(infohash) + pl = self.mdb.getMyPrefListInfohash() assert len(pl) == 11 assert infohash not in pl data = {'destination_path':destpath} - db.addMyPreference(infohash, data) - p2 = db.getOne(('torrent_id', 'destination_path', 'progress', 'creation_time'), torrent_id=126) + self.mdb.addMyPreference(infohash, data) + p2 = self.mypref_db.getOne(('torrent_id', 'destination_path', 'progress', 'creation_time'), torrent_id=126) assert p2[0] == p[0] and p2[1] == p[1] and p2[2] == 0 and time() - p2[3] < 10 , p2 - db.deletePreference(infohash) - pl = db.getMyPrefListInfohash() + self.mdb.deletePreference(infohash) + pl = self.mypref_db.getMyPrefListInfohash() assert len(pl) == 11 assert infohash not in pl data = {'destination_path':destpath, 'progress':progress, 'creation_time':creation_time} - db.addMyPreference(infohash, data) - p3 = db.getOne(('torrent_id', 'destination_path', 'progress', 'creation_time'), torrent_id=126) + self.mdb.addMyPreference(infohash, data) + p3 = self.mdb.getOne(('torrent_id', 'destination_path', 'progress', 'creation_time'), torrent_id=126) assert p3 == p, p3 - def singtest_updateProgress(self): + def test_updateProgress(self): infohash_str_126 = 'ByJho7yj9mWY1ORWgCZykLbU1Xc=' infohash = str2bin(infohash_str_126) - db = MyPreferenceDBHandler.getInstance() - assert db.hasMyPreference(infohash) - torrent_id = db._db.getTorrentID(infohash) - db.updateProgress(infohash, 3.14) - p = db.getOne('progress', torrent_id=torrent_id) + assert self.mdb.hasMyPreference(infohash) + torrent_id = self.mdb._db.getTorrentID(infohash) + self.mdb.updateProgress(infohash, 3.14) + p = self.mdb.getOne('progress', torrent_id=torrent_id) assert p == 3.14 - def singtest_getMyPrefListInfohash(self): - db = MyPreferenceDBHandler.getInstance() - preflist = db.getMyPrefListInfohash() + def test_getMyPrefListInfohash(self): + preflist = self.mdb.getMyPrefListInfohash() for p in preflist: assert len(p) == 20 assert len(preflist) == 12 - def singtest_getMyPrefStats(self): - db = MyPreferenceDBHandler.getInstance() - res = db.getMyPrefStats() + def test_getMyPrefStats(self): + res = self.mdb.getMyPrefStats() assert len(res) == 12 for k in res: data = res[k] assert len(data) == 3 - -def test_suite(): - suite = unittest.TestSuite() - # We should run the tests in a separate Python interpreter to prevent - # problems with our singleton classes, e.g. PeerDB, etc. - if len(sys.argv) != 3: - print "Usage: python test_so.py " - else: - for class_ in unittest.TestCase.__subclasses__(): - if class_.__name__ == sys.argv[1]: - init() - suite.addTest(class_(sys.argv[2])) - return suite - -def main(): - unittest.main(defaultTest='test_suite', argv=[sys.argv[0]]) - -if __name__ == "__main__": - main() diff --git a/Tribler/Test/test_sqlitecachedbhandler.sh b/Tribler/Test/test_sqlitecachedbhandler.sh deleted file mode 100755 index 78fa433540f..00000000000 --- a/Tribler/Test/test_sqlitecachedbhandler.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh -x -# -# We should run the tests in a separate Python interpreter to prevent -# problems with our singleton classes, e.g. SuperPeerDB, etc. -# -# WARNING: this shell script must use \n as end-of-line, Windows -# \r\n gives problems running this on Linux - -PYTHONPATH=../..:"$PYTHONPATH" -export PYTHONPATH - -python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_size -python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_getOne -python test_sqlitecachedbhandler.py TestSqliteBasicDBHandler singtest_getAll - -python test_sqlitecachedbhandler.py TestSqliteMyDBHandler singtest_get -python test_sqlitecachedbhandler.py TestSqliteMyDBHandler singtest_put - -python test_sqlitecachedbhandler.py TestSuperPeerDBHandler singtest_setSuperPeer -python test_sqlitecachedbhandler.py TestSuperPeerDBHandler singtest_addExternalSuperPeer - -python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_size -python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_getFriends -python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_setFriendState -python test_sqlitecachedbhandler.py TestFriendDBHandler singtest_addExternalFriend - -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getList -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeerSim -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeerList -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPeers -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_addPeer -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_aa_hasPeer -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_findPeers -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_updatePeer -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_deletePeer -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_updatePeer -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_getPermIDByIP -python test_sqlitecachedbhandler.py TestSqlitePeerDBHandler singtest_loadPeers - -python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_getPrefList -python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_addPreference -python test_sqlitecachedbhandler.py TestPreferenceDBHandler singtest_addPeerPreferences - -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtested_functions -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_count -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_loadTorrents -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_add_update_delete_Torrent -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_getCollectedTorrentHashes -python test_sqlitecachedbhandler.py TestTorrentDBHandler singtest_freeSpace - -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getPrefList -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getCreationTime -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getRecentLivePrefList -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_hasMyPreference -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_addMyPreference_deletePreference -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_updateProgress -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getMyPrefListInfohash -python test_sqlitecachedbhandler.py TestMyPreferenceDBHandler singtest_getMyPrefStats