Просмотр исходного кода

Merge pull request #4 from jefforeilly/py3

python3 support
Kaifeng Xu 7 лет назад
Родитель
Сommit
9558f8d3f6

+ 63 - 0
.gitignore

@@ -0,0 +1,63 @@
+# Created by .ignore support plugin (hsz.mobi)
+### Python template
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*,cover
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+
+*.iml
+.idea

+ 0 - 34
fdfs_client/client.conf

@@ -1,34 +0,0 @@
-# connect timeout in seconds
-# default value is 30s
-connect_timeout=30
-
-# network timeout in seconds
-# default value is 30s
-network_timeout=60
-
-# the base path to store log files
-base_path=/home/tenma/fastdfs
-
-# tracker_server can ocur more than once, and tracker_server format is
-#  "host:port", host can be hostname or ip address
-tracker_server=127.0.0.1:22122
-#tracker_server=192.168.243.135:22122
-
-#standard log level as syslog, case insensitive, value list:
-### emerg for emergency
-### alert
-### crit for critical
-### error
-### warn for warning
-### notice
-### info
-### debug
-log_level=info
-
-
-#HTTP settings
-http.tracker_server_port=8080
-
-#use "#include" directive to include HTTP other settiongs
-##include http.conf
-

+ 89 - 96
fdfs_client/client.py

@@ -2,19 +2,18 @@
 # -*- coding: utf-8 -*-
 # filename: client.py
 
-'''
+"""
   Client module for Fastdfs 3.08
   author: scott yuan scottzer8@gmail.com
   date: 2012-06-21
-'''
+"""
 
-import os, sys
-from fdfs_client.utils import *
 from fdfs_client.tracker_client import *
 from fdfs_client.storage_client import *
 from fdfs_client.exceptions import *
 
-def get_tracker_conf(conf_path = 'client.conf'):
+
+def get_tracker_conf(conf_path='client.conf'):
     cf = Fdfs_ConfigParser()
     tracker = {}
     try:
@@ -34,15 +33,16 @@ def get_tracker_conf(conf_path = 'client.conf'):
         raise
     return tracker
 
+
 class Fdfs_client(object):
-    '''
+    """
     Class Fdfs_client implemented Fastdfs client protol ver 3.08.
 
     It's useful upload, download, delete file to or from fdfs server, etc. It's uses
     connection pool to manage connection to server.
-    '''
-    def __init__(self, conf_path = '/etc/fdfs/client.conf', \
-                 poolclass =ConnectionPool):
+    """
+
+    def __init__(self, conf_path='/etc/fdfs/client.conf', poolclass=ConnectionPool):
         self.trackers = get_tracker_conf(conf_path)
         self.tracker_pool = poolclass(**self.trackers)
         self.timeout  = self.trackers['timeout']
@@ -64,7 +64,7 @@ class Fdfs_client(object):
         return store
 
     def upload_by_filename(self, filename, meta_dict = None):
-        '''
+        """
         Upload a file to Storage server.
         arguments:
         @filename: string, name of file that will be uploaded
@@ -82,7 +82,7 @@ class Fdfs_client(object):
             'Uploaded size'   : upload_size,
             'Storage IP'      : storage_ip
         } if success else None
-        '''
+        """
         isfile, errmsg = fdfs_check_file(filename)
         if not isfile:
             raise DataError(errmsg + '(uploading)')
@@ -90,7 +90,7 @@ class Fdfs_client(object):
         store_serv = tc.tracker_query_storage_stor_without_group()
         return self.get_storage(store_serv).storage_upload_by_filename(tc, store_serv, filename, meta_dict)
 
-    def upload_by_file(self, filename, meta_dict = None):
+    def upload_by_file(self, filename, meta_dict=None):
         isfile, errmsg = fdfs_check_file(filename)
         if not isfile:
             raise DataError(errmsg + '(uploading)')
@@ -98,8 +98,8 @@ class Fdfs_client(object):
         store_serv = tc.tracker_query_storage_stor_without_group()
         return self.get_storage(store_serv).storage_upload_by_file(tc, store_serv, filename, meta_dict)
 
-    def upload_by_buffer(self, filebuffer, file_ext_name = None, meta_dict = None):
-        '''
+    def upload_by_buffer(self, filebuffer, file_ext_name=None, meta_dict=None):
+        """
         Upload a buffer to Storage server.
         arguments:
         @filebuffer: string, buffer
@@ -118,7 +118,7 @@ class Fdfs_client(object):
             'Uploaded size'   : upload_size,
             'Storage IP'      : storage_ip
         } if success else None
-        '''
+        """
         if not filebuffer:
             raise DataError('[-] Error: argument filebuffer can not be null.')
         tc = Tracker_client(self.tracker_pool)
@@ -127,8 +127,8 @@ class Fdfs_client(object):
                                               file_ext_name, meta_dict)
 
     def upload_slave_by_filename(self, filename, remote_file_id, prefix_name, \
-                                 meta_dict = None):
-        '''
+                                 meta_dict=None):
+        """
         Upload slave file to Storage server.
         arguments:
         @filename: string, local file name
@@ -147,7 +147,7 @@ class Fdfs_client(object):
             'Remote file id'  : remote_file_id,
             'Storage IP'      : storage_ip
         }
-        '''
+        """
         isfile, errmsg = fdfs_check_file(filename)
         if not isfile:
             raise DataError(errmsg + '(uploading slave)')
@@ -162,16 +162,16 @@ class Fdfs_client(object):
         store = self.get_storage(store_serv)
         try:
             ret_dict = store.storage_upload_slave_by_filename(tc, store_serv, filename, \
-                                                          prefix_name, remote_filename, \
-                                                          meta_dict = None)
+                                                              prefix_name, remote_filename, \
+                                                              meta_dict=None)
         except:
             raise
         ret_dict['Status'] = 'Upload slave file successed.'
         return ret_dict
 
     def upload_slave_by_file(self, filename, remote_file_id, prefix_name, \
-                                 meta_dict = None):
-        '''
+                             meta_dict=None):
+        """
         Upload slave file to Storage server.
         arguments:
         @filename: string, local file name
@@ -190,7 +190,7 @@ class Fdfs_client(object):
             'Remote file id'  : remote_file_id,
             'Storage IP'      : storage_ip
         }
-        '''
+        """
         isfile, errmsg = fdfs_check_file(filename)
         if not isfile:
             raise DataError(errmsg + '(uploading slave)')
@@ -206,15 +206,15 @@ class Fdfs_client(object):
         try:
             ret_dict = store.storage_upload_slave_by_file(tc, store_serv, filename, \
                                                           prefix_name, remote_filename, \
-                                                          meta_dict = None)
+                                                          meta_dict=None)
         except:
             raise
         ret_dict['Status'] = 'Upload slave file successed.'
         return ret_dict
 
     def upload_slave_by_buffer(self, filebuffer, remote_file_id, \
-                               meta_dict = None, file_ext_name = None):
-        '''
+                               meta_dict=None, file_ext_name=None):
+        """
         Upload slave file by buffer
         arguments:
         @filebuffer: string
@@ -232,7 +232,7 @@ class Fdfs_client(object):
             'Remote file id'  : remote_file_id,
             'Storage IP'      : storage_ip
         }
-        '''
+        """
         if not filebuffer:
             raise DataError('[-] Error: argument filebuffer can not be null.')
         tmp = split_remote_fileid(remote_file_id)
@@ -245,9 +245,9 @@ class Fdfs_client(object):
         return store.storage_upload_slave_by_buffer(tc, store_serv, filebuffer, \
                                                     remote_filename, meta_dict, \
                                                     file_ext_name)
-            
-    def upload_appender_by_filename(self, local_filename, meta_dict = None):
-        '''
+
+    def upload_appender_by_filename(self, local_filename, meta_dict=None):
+        """
         Upload an appender file by filename.
         arguments:
         @local_filename: string
@@ -265,7 +265,7 @@ class Fdfs_client(object):
             'Uploaded size'   : upload_size,
             'Storage IP'      : storage_ip
         } if success else None
-        '''
+        """
         isfile, errmsg = fdfs_check_file(local_filename)
         if not isfile:
             raise DataError(errmsg + '(uploading appender)')
@@ -275,8 +275,8 @@ class Fdfs_client(object):
         return store.storage_upload_appender_by_filename(tc, store_serv, \
                                                          local_filename, meta_dict)
 
-    def upload_appender_by_file(self, local_filename, meta_dict = None):
-        '''
+    def upload_appender_by_file(self, local_filename, meta_dict=None):
+        """
         Upload an appender file by file.
         arguments:
         @local_filename: string
@@ -294,7 +294,7 @@ class Fdfs_client(object):
             'Uploaded size'   : upload_size,
             'Storage IP'      : storage_ip
         } if success else None
-        '''
+        """
         isfile, errmsg = fdfs_check_file(local_filename)
         if not isfile:
             raise DataError(errmsg + '(uploading appender)')
@@ -302,10 +302,10 @@ class Fdfs_client(object):
         store_serv = tc.tracker_query_storage_stor_without_group()
         store = self.get_storage(store_serv)
         return store.storage_upload_appender_by_file(tc, store_serv, \
-                                                         local_filename, meta_dict)
+                                                     local_filename, meta_dict)
 
-    def upload_appender_by_buffer(self, filebuffer, file_ext_name = None, meta_dict = None):
-        '''
+    def upload_appender_by_buffer(self, filebuffer, file_ext_name=None, meta_dict=None):
+        """
         Upload a buffer to Storage server.
         arguments:
         @filebuffer: string
@@ -319,7 +319,7 @@ class Fdfs_client(object):
             'Uploaded size'   : upload_size,
             'Storage IP'      : storage_ip
         } if success else None
-        '''
+        """
         if not filebuffer:
             raise DataError('[-] Error: argument filebuffer can not be null.')
         tc = Tracker_client(self.tracker_pool)
@@ -330,12 +330,12 @@ class Fdfs_client(object):
                                                        file_ext_name)
 
     def delete_file(self, remote_file_id):
-        '''
+        """
         Delete a file from Storage server.
         arguments:
         @remote_file_id: string, file_id of file that is on storage server
         @return tuple ('Delete file successed.', remote_file_id, storage_ip)
-        '''
+        """
         tmp = split_remote_fileid(remote_file_id)
         if not tmp:
             raise DataError('[-] Error: remote_file_id is invalid.(in delete file)')
@@ -345,8 +345,8 @@ class Fdfs_client(object):
         store = self.get_storage(store_serv)
         return store.storage_delete_file(tc, store_serv, remote_filename)
 
-    def download_to_file(self, local_filename, remote_file_id, offset = 0, down_bytes = 0):
-        '''
+    def download_to_file(self, local_filename, remote_file_id, offset=0, down_bytes=0):
+        """
         Download a file from Storage server.
         arguments:
         @local_filename: string, local name of file 
@@ -359,24 +359,23 @@ class Fdfs_client(object):
             'Download size'   : downloaded_size,
             'Storage IP'      : storage_ip
         }
-        '''
+        """
         tmp = split_remote_fileid(remote_file_id)
         if not tmp:
             raise DataError('[-] Error: remote_file_id is invalid.(in download file)')
         group_name, remote_filename = tmp
         if not offset:
-            file_offset = long(offset)
+            file_offset = offset
         if not down_bytes:
-            download_bytes = long(down_bytes)
+            download_bytes = down_bytes
         tc = Tracker_client(self.tracker_pool)
         store_serv = tc.tracker_query_storage_fetch(group_name, remote_filename)
         store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
-        return store.storage_download_to_file(tc, store_serv, local_filename, \
-                                              file_offset, download_bytes, \
+        return store.storage_download_to_file(tc, store_serv, local_filename, file_offset, download_bytes,
                                               remote_filename)
 
-    def download_to_buffer(self, remote_file_id, offset = 0, down_bytes = 0):
-        '''
+    def download_to_buffer(self, remote_file_id, offset=0, down_bytes=0):
+        """
         Download a file from Storage server and store in buffer.
         arguments:
         @remote_file_id: string, file_id of file that is on storage server
@@ -388,35 +387,35 @@ class Fdfs_client(object):
             'Download size'   : downloaded_size,
             'Storage IP'      : storage_ip
         }
-        '''
+        """
         tmp = split_remote_fileid(remote_file_id)
         if not tmp:
             raise DataError('[-] Error: remote_file_id is invalid.(in download file)')
         group_name, remote_filename = tmp
         if not offset:
-            file_offset = long(offset)
+            file_offset = offset
         if not down_bytes:
-            download_bytes = long(down_bytes)
+            download_bytes = down_bytes
         tc = Tracker_client(self.tracker_pool)
         store_serv = tc.tracker_query_storage_fetch(group_name, remote_filename)
         store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
         file_buffer = None
         return store.storage_download_to_buffer(tc, store_serv, file_buffer, \
                                                 file_offset, download_bytes, \
-                                              remote_filename)
-                                              
+                                                remote_filename)
+
     def list_one_group(self, group_name):
-        '''
+        """
         List one group information.
         arguments:
         @group_name: string, group name will be list
         @return Group_info,  instance
-        '''
+        """
         tc = Tracker_client(self.tracker_pool)
         return tc.tracker_list_one_group(group_name)
 
-    def list_servers(self, group_name, storage_ip = None):
-        '''
+    def list_servers(self, group_name, storage_ip=None):
+        """
         List all storage servers information in a group
         arguments:
         @group_name: string
@@ -424,28 +423,28 @@ class Fdfs_client(object):
             'Group name' : group_name,
             'Servers'    : server list,
         }
-        '''
+        """
         tc = Tracker_client(self.tracker_pool)
         return tc.tracker_list_servers(group_name, storage_ip)
 
     def list_all_groups(self):
-        '''
+        """
         List all group information.
         @return dictionary {
             'Groups count' : group_count,
             'Groups'       : list of groups
         }
-        '''
+        """
         tc = Tracker_client(self.tracker_pool)
         return tc.tracker_list_all_groups()
 
     def get_meta_data(self, remote_file_id):
-        '''
+        """
         Get meta data of remote file.
         arguments:
         @remote_fileid: string, remote file id
         @return dictionary, meta data
-        '''
+        """
         tmp = split_remote_fileid(remote_file_id)
         if not tmp:
             raise DataError('[-] Error: remote_file_id is invalid.(in get meta data)')
@@ -455,9 +454,8 @@ class Fdfs_client(object):
         store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
         return store.storage_get_metadata(tc, store_serv, remote_filename)
 
-    def set_meta_data(self, remote_file_id, \
-                      meta_dict, op_flag = STORAGE_SET_METADATA_FLAG_OVERWRITE):
-        '''
+    def set_meta_data(self, remote_file_id, meta_dict, op_flag=STORAGE_SET_METADATA_FLAG_OVERWRITE):
+        """
         Set meta data of remote file.
         arguments:
         @remote_file_id: string
@@ -467,7 +465,7 @@ class Fdfs_client(object):
             'Status'     : status,
             'Storage IP' : storage_ip
         }
-        '''
+        """
         tmp = split_remote_fileid(remote_file_id)
         if not tmp:
             raise DataError('[-] Error: remote_file_id is invalid.(in set meta data)')
@@ -476,17 +474,14 @@ class Fdfs_client(object):
         try:
             store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
             store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
-            status = store.storage_set_metadata(tc, store_serv, \
-                                                remote_filename, meta_dict)
+            status = store.storage_set_metadata(tc, store_serv, remote_filename, meta_dict)
         except (ConnectionError, ResponseError, DataError):
             raise
-        #if status == 2:
+        # if status == 2:
         #    raise DataError('[-] Error: remote file %s is not exist.' % remote_file_id)
         if status != 0:
-            raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
-        ret_dict = {}
-        ret_dict['Status'] = 'Set meta data success.'
-        ret_dict['Storage IP'] = store_serv.ip_addr
+            raise DataError('[-] Error: %d, %s' % (status, os.strerror(status)))
+        ret_dict = {'Status': 'Set meta data success.', 'Storage IP': store_serv.ip_addr}
         return ret_dict
 
     def append_by_filename(self, local_filename, remote_fileid):
@@ -515,7 +510,7 @@ class Fdfs_client(object):
         store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
         store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
         return store.storage_append_by_file(tc, store_serv, local_filename, \
-                                                appended_filename)
+                                            appended_filename)
 
     def append_by_buffer(self, file_buffer, remote_fileid):
         if not file_buffer:
@@ -528,11 +523,11 @@ class Fdfs_client(object):
         store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
         store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
         return store.storage_append_by_buffer(tc, store_serv, file_buffer, \
-                                                appended_filename)
+                                              appended_filename)
 
 
     def truncate_file(self, truncated_filesize, appender_fileid):
-        '''
+        """
         Truncate file in Storage server.
         arguments:
         @truncated_filesize: long
@@ -541,8 +536,8 @@ class Fdfs_client(object):
             'Status'     : 'Truncate successed.',
             'Storage IP' : storage_ip
         }
-        '''
-        trunc_filesize = long(truncated_filesize)
+        """
+        trunc_filesize = truncated_filesize
         tmp = split_remote_fileid(appender_fileid)
         if not tmp:
             raise DataError('[-] Error: appender_fileid is invalid.(truncate)')
@@ -552,9 +547,9 @@ class Fdfs_client(object):
         store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
         return store.storage_truncate_file(tc, store_serv, trunc_filesize, \
                                            appender_filename)
-        
-    def modify_by_filename(self, filename, appender_fileid, offset = 0):
-        '''
+
+    def modify_by_filename(self, filename, appender_fileid, offset=0):
+        """
         Modify a file in Storage server by file.
         arguments:
         @filename: string, local file name
@@ -564,7 +559,7 @@ class Fdfs_client(object):
             'Status'     : 'Modify successed.',
             'Storage IP' : storage_ip
         }
-        '''
+        """
         isfile, errmsg = fdfs_check_file(filename)
         if not isfile:
             raise DataError(errmsg + '(modify)')
@@ -574,7 +569,7 @@ class Fdfs_client(object):
             raise DataError('[-] Error: remote_fileid is invalid.(modify)')
         group_name, appender_filename = tmp
         if not offset:
-            file_offset = long(offset)
+            file_offset = offset
         else:
             file_offset = 0
         tc = Tracker_client(self.tracker_pool)
@@ -583,8 +578,8 @@ class Fdfs_client(object):
         return store.storage_modify_by_filename(tc, store_serv, filename, file_offset, \
                                                 filesize, appender_filename)
 
-    def modify_by_file(self, filename, appender_fileid, offset = 0):
-        '''
+    def modify_by_file(self, filename, appender_fileid, offset=0):
+        """
         Modify a file in Storage server by file.
         arguments:
         @filename: string, local file name
@@ -594,7 +589,7 @@ class Fdfs_client(object):
             'Status'     : 'Modify successed.',
             'Storage IP' : storage_ip
         }
-        '''
+        """
         isfile, errmsg = fdfs_check_file(filename)
         if not isfile:
             raise DataError(errmsg + '(modify)')
@@ -604,17 +599,17 @@ class Fdfs_client(object):
             raise DataError('[-] Error: remote_fileid is invalid.(modify)')
         group_name, appender_filename = tmp
         if not offset:
-            file_offset = long(offset)
+            file_offset = offset
         else:
             file_offset = 0
         tc = Tracker_client(self.tracker_pool)
         store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
         store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
         return store.storage_modify_by_file(tc, store_serv, filename, file_offset, \
-                                                filesize, appender_filename)
+                                            filesize, appender_filename)
 
-    def modify_by_buffer(self, filebuffer, appender_fileid, offset = 0):
-        '''
+    def modify_by_buffer(self, filebuffer, appender_fileid, offset=0):
+        """
         Modify a file in Storage server by buffer.
         arguments:
         @filebuffer: string, file buffer
@@ -624,7 +619,7 @@ class Fdfs_client(object):
             'Status'     : 'Modify successed.',
             'Storage IP' : storage_ip
         }
-        '''
+        """
         if not filebuffer:
             raise DataError('[-] Error: filebuffer can not be null.(modify)')
         filesize = len(filebuffer)
@@ -633,13 +628,11 @@ class Fdfs_client(object):
             raise DataError('[-] Error: remote_fileid is invalid.(modify)')
         group_name, appender_filename = tmp
         if not offset:
-            file_offset = long(offset)
+            file_offset = offset
         else:
             file_offset = 0
         tc = Tracker_client(self.tracker_pool)
         store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
         store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
         return store.storage_modify_by_buffer(tc, store_serv, filebuffer, file_offset, \
-                                                filesize, appender_filename)
-        
-    
+                                              filesize, appender_filename)

+ 53 - 46
fdfs_client/connection.py

@@ -14,11 +14,12 @@ from fdfs_client.exceptions import (
     ResponseError,
     InvaildResponse,
     DataError
-    )
+)
 
 # start class Connection
 class Connection(object):
-    '''Manage TCP comunication to and from Fastdfs Server.'''
+    """Manage TCP comunication to and from Fastdfs Server."""
+
     def __init__(self, **conn_kwargs):
         self.pid = os.getpid()
         self.host_tuple = conn_kwargs['host_tuple']
@@ -34,42 +35,42 @@ class Connection(object):
             pass
 
     def connect(self):
-        '''Connect to fdfs server.'''
+        """Connect to fdfs server."""
         if self._sock:
             return
         try:
             sock = self._connect()
-        except socket.error, e:
+        except socket.error as e:
             raise ConnectionError(self._errormessage(e))
         self._sock = sock
-        #print '[+] Create a connection success.'
-        #print '\tLocal address is %s:%s.' % self._sock.getsockname()
-        #print '\tRemote address is %s:%s' % (self.remote_addr, self.remote_port)
+        # print '[+] Create a connection success.'
+        # print '\tLocal address is %s:%s.' % self._sock.getsockname()
+        # print '\tRemote address is %s:%s' % (self.remote_addr, self.remote_port)
 
     def sendall(self, msg):
         if not self._sock:
             self.connect()
         self._sock.sendall(msg)
-        
+
     def recv(self, len):
         return self._sock.recv(len)
 
     def _connect(self):
-        '''Create TCP socket. The host is random one of host_tuple.'''
+        """Create TCP socket. The host is random one of host_tuple."""
         self.remote_addr, self.remote_port = random.choice(self.host_tuple)
-        #print '[+] Connecting... remote: %s:%s' % (self.remote_addr, self.remote_port)
-        #sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-        #sock.settimeout(self.timeout)
-        sock = socket.create_connection((self.remote_addr, self.remote_port),self.timeout)
+        # print '[+] Connecting... remote: %s:%s' % (self.remote_addr, self.remote_port)
+        # sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        # sock.settimeout(self.timeout)
+        sock = socket.create_connection((self.remote_addr, self.remote_port), self.timeout)
         return sock
 
     def disconnect(self):
-        '''Disconnect from fdfs server.'''
+        """Disconnect from fdfs server."""
         if self._sock is None:
             return
         try:
             self._sock.close()
-        except socket.error, e:
+        except socket.error as e:
             pass
         self._sock = None
 
@@ -78,29 +79,32 @@ class Connection(object):
 
     def _errormessage(self, exception):
         # args for socket.error can either be (errno, "message")
-        # or just "message" '''
+        # or just "message" """
         if len(exception.args) == 1:
             return "[-] Error: connect to %s:%s. %s." % \
-                (self.remote_addr, self.remote_port, exception.args[0])
+                   (self.remote_addr, self.remote_port, exception.args[0])
         else:
             return "[-] Error: %s connect to %s:%s. %s." % \
-                (exception.args[0], self.remote_addr, self.remote_port, exception.args[1])
+                   (exception.args[0], self.remote_addr, self.remote_port, exception.args[1])
+
+
 # end class Connection
 
 # start ConnectionPool
 class ConnectionPool(object):
-    '''Generic Connection Pool'''
-    def __init__(self, name = '', conn_class = Connection,
-                 max_conn = None, **conn_kwargs):
+    """Generic Connection Pool"""
+
+    def __init__(self, name='', conn_class=Connection,
+                 max_conn=None, **conn_kwargs):
         self.pool_name = name
         self.pid = os.getpid()
         self.conn_class = conn_class
-        self.max_conn = max_conn or 2**31
+        self.max_conn = max_conn or 2 ** 31
         self.conn_kwargs = conn_kwargs
         self._conns_created = 0
         self._conns_available = []
         self._conns_inuse = set()
-        #print '[+] Create a connection pool success, name: %s.' % self.pool_name
+        # print '[+] Create a connection pool success, name: %s.' % self.pool_name
 
     def _check_pid(self):
         if self.pid != os.getpid():
@@ -108,7 +112,7 @@ class ConnectionPool(object):
             self.__init__(self.pool_name, self.conn_class, self.max_conn, **self.conn_kwargs)
 
     def make_conn(self):
-        '''Create a new connection.'''
+        """Create a new connection."""
         if self._conns_created >= self.max_conn:
             raise ConnectionError('[-] Error: Too many connections.')
         num_try = 10
@@ -120,27 +124,27 @@ class ConnectionPool(object):
                 conn_instance.connect()
                 self._conns_created += 1
                 break
-            except ConnectionError, e:
-                print e
+            except ConnectionError as e:
+                print(e)
                 num_try -= 1
                 conn_instance = None
         return conn_instance
 
     def get_connection(self):
-        '''Get a connection from pool.'''
+        """Get a connection from pool."""
         self._check_pid()
         try:
             conn = self._conns_available.pop()
-            #print '[+] Get a connection from pool %s.' % self.pool_name
-            #print '\tLocal address is %s:%s.' % conn._sock.getsockname()
-            #print '\tRemote address is %s:%s' % (conn.remote_addr, conn.remote_port)
+            # print '[+] Get a connection from pool %s.' % self.pool_name
+            # print '\tLocal address is %s:%s.' % conn._sock.getsockname()
+            # print '\tRemote address is %s:%s' % (conn.remote_addr, conn.remote_port)
         except IndexError:
             conn = self.make_conn()
         self._conns_inuse.add(conn)
         return conn
 
     def remove(self, conn):
-        '''Remove connection from pool.'''
+        """Remove connection from pool."""
         if conn in self._conns_inuse:
             self._conns_inuse.remove(conn)
             self._conns_created -= 1
@@ -149,30 +153,32 @@ class ConnectionPool(object):
             self._conns_created -= 1
 
     def destroy(self):
-        '''Disconnect all connections in the pool.'''
+        """Disconnect all connections in the pool."""
         all_conns = chain(self._conns_inuse, self._conns_available)
         for conn in all_conns:
             conn.disconnect()
-        #print '[-] Destroy connection pool %s.' % self.pool_name
+            # print '[-] Destroy connection pool %s.' % self.pool_name
 
     def release(self, conn):
-        '''Release the connection back to the pool.'''
+        """Release the connection back to the pool."""
         self._check_pid()
         if conn.pid == self.pid:
             self._conns_inuse.remove(conn)
             self._conns_available.append(conn)
-        #print '[-] Release connection back to pool %s.' % self.pool_name
+            # print '[-] Release connection back to pool %s.' % self.pool_name
+
+
 # end ConnectionPool class
 
-def tcp_recv_response(conn, bytes_size, buffer_size = 4096):
-    '''Receive response from server.
+def tcp_recv_response(conn, bytes_size, buffer_size=4096):
+    """Receive response from server.
         It is not include tracker header.
         arguments:
         @conn: connection
         @bytes_size: int, will be received byte_stream size
         @buffer_size: int, receive buffer size
         @Return: tuple,(response, received_size)
-    '''
+    """
     recv_buff = []
     total_size = 0
     try:
@@ -181,21 +187,22 @@ def tcp_recv_response(conn, bytes_size, buffer_size = 4096):
             recv_buff.append(resp)
             total_size += len(resp)
             bytes_size -= len(resp)
-    except (socket.error, socket.timeout), e:
-            raise ConnectionError('[-] Error: while reading from socket: (%s)' \
-                                    % e.args)
-    return (''.join(recv_buff), total_size)
+    except (socket.error, socket.timeout) as e:
+        raise ConnectionError('[-] Error: while reading from socket: (%s)' \
+                              % e.args)
+    return b''.join(recv_buff), total_size
+
 
 def tcp_send_data(conn, bytes_stream):
-    '''Send buffer to server.
+    """Send buffer to server.
         It is not include tracker header.
         arguments:
         @conn: connection
         @bytes_stream: trasmit buffer
         @Return bool
-    '''
+    """
     try:
         conn._sock.sendall(bytes_stream)
-    except (socket.error, socket.timeout), e:
+    except (socket.error, socket.timeout) as e:
         raise ConnectionError('[-] Error: while writting to socket: (%s)' \
-                                % e.args)
+                              % e.args)

+ 1 - 1
fdfs_client/exceptions.py

@@ -2,7 +2,7 @@
 # -*- coding: utf-8 -*-
 # filename: exceptions.py
 
-'''Core exceptions raised by fdfs client'''
+"""Core exceptions raised by fdfs client"""
 
 class FDFSError(Exception):
     pass

+ 161 - 156
fdfs_client/fdfs_protol.py

@@ -14,174 +14,178 @@ from fdfs_client.exceptions import (
 
 
 ## define FDFS protol constans
-TRACKER_PROTO_CMD_STORAGE_JOIN     =         81
-FDFS_PROTO_CMD_QUIT		   =         82
-TRACKER_PROTO_CMD_STORAGE_BEAT     =         83  #storage heart beat
-TRACKER_PROTO_CMD_STORAGE_REPORT_DISK_USAGE =84  #report disk usage
-TRACKER_PROTO_CMD_STORAGE_REPLICA_CHG    =   85  #repl new storage servers
-TRACKER_PROTO_CMD_STORAGE_SYNC_SRC_REQ   =   86  #src storage require sync
-TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_REQ     = 87  #dest storage require sync
-TRACKER_PROTO_CMD_STORAGE_SYNC_NOTIFY       = 88  #sync done notify
-TRACKER_PROTO_CMD_STORAGE_SYNC_REPORT	    = 89  #report src last synced time as dest server
-TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_QUERY   = 79 #dest storage query sync src storage server
-TRACKER_PROTO_CMD_STORAGE_REPORT_IP_CHANGED = 78  #storage server report it's ip changed
-TRACKER_PROTO_CMD_STORAGE_CHANGELOG_REQ     = 77  #storage server request storage server's changelog
-TRACKER_PROTO_CMD_STORAGE_REPORT_STATUS     = 76  #report specified storage server status
-TRACKER_PROTO_CMD_STORAGE_PARAMETER_REQ	    = 75  #storage server request parameters
-TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FREE = 74  #storage report trunk free space
-TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FID  = 73  #storage report current trunk file id
-TRACKER_PROTO_CMD_STORAGE_FETCH_TRUNK_FID   = 72  #storage get current trunk file id
-
-TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_START = 61  #start of tracker get system data files
-TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_END   = 62  #end of tracker get system data files
-TRACKER_PROTO_CMD_TRACKER_GET_ONE_SYS_FILE    = 63  #tracker get a system data file
-TRACKER_PROTO_CMD_TRACKER_GET_STATUS          = 64  #tracker get status of other tracker
-TRACKER_PROTO_CMD_TRACKER_PING_LEADER         = 65  #tracker ping leader
-TRACKER_PROTO_CMD_TRACKER_NOTIFY_NEXT_LEADER  = 66  #notify next leader to other trackers
-TRACKER_PROTO_CMD_TRACKER_COMMIT_NEXT_LEADER  = 67  #commit next leader to other trackers
-
-TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP		=	90
-TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS	=	91
-TRACKER_PROTO_CMD_SERVER_LIST_STORAGE		=	92
-TRACKER_PROTO_CMD_SERVER_DELETE_STORAGE		=	93
+TRACKER_PROTO_CMD_STORAGE_JOIN = 81
+FDFS_PROTO_CMD_QUIT = 82
+TRACKER_PROTO_CMD_STORAGE_BEAT = 83  # storage heart beat
+TRACKER_PROTO_CMD_STORAGE_REPORT_DISK_USAGE = 84  # report disk usage
+TRACKER_PROTO_CMD_STORAGE_REPLICA_CHG = 85  # repl new storage servers
+TRACKER_PROTO_CMD_STORAGE_SYNC_SRC_REQ = 86  # src storage require sync
+TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_REQ = 87  # dest storage require sync
+TRACKER_PROTO_CMD_STORAGE_SYNC_NOTIFY = 88  # sync done notify
+TRACKER_PROTO_CMD_STORAGE_SYNC_REPORT = 89  # report src last synced time as dest server
+TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_QUERY = 79  # dest storage query sync src storage server
+TRACKER_PROTO_CMD_STORAGE_REPORT_IP_CHANGED = 78  # storage server report it's ip changed
+TRACKER_PROTO_CMD_STORAGE_CHANGELOG_REQ = 77  # storage server request storage server's changelog
+TRACKER_PROTO_CMD_STORAGE_REPORT_STATUS = 76  # report specified storage server status
+TRACKER_PROTO_CMD_STORAGE_PARAMETER_REQ = 75  # storage server request parameters
+TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FREE = 74  # storage report trunk free space
+TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FID = 73  # storage report current trunk file id
+TRACKER_PROTO_CMD_STORAGE_FETCH_TRUNK_FID = 72  # storage get current trunk file id
+
+TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_START = 61  # start of tracker get system data files
+TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_END = 62  # end of tracker get system data files
+TRACKER_PROTO_CMD_TRACKER_GET_ONE_SYS_FILE = 63  # tracker get a system data file
+TRACKER_PROTO_CMD_TRACKER_GET_STATUS = 64  # tracker get status of other tracker
+TRACKER_PROTO_CMD_TRACKER_PING_LEADER = 65  # tracker ping leader
+TRACKER_PROTO_CMD_TRACKER_NOTIFY_NEXT_LEADER = 66  # notify next leader to other trackers
+TRACKER_PROTO_CMD_TRACKER_COMMIT_NEXT_LEADER = 67  # commit next leader to other trackers
+
+TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP = 90
+TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS = 91
+TRACKER_PROTO_CMD_SERVER_LIST_STORAGE = 92
+TRACKER_PROTO_CMD_SERVER_DELETE_STORAGE = 93
 TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE = 101
-TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE	     =  102
-TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE  	     =  103
-TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE =	104
-TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL	=	105
-TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL =	106
-TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL = 	107
-TRACKER_PROTO_CMD_RESP			=		100
-FDFS_PROTO_CMD_ACTIVE_TEST		=               111  #active test, tracker and storage both support since V1.28
-
-STORAGE_PROTO_CMD_REPORT_CLIENT_IP	= 9   #ip as tracker client
-STORAGE_PROTO_CMD_UPLOAD_FILE		= 11
-STORAGE_PROTO_CMD_DELETE_FILE		= 12
-STORAGE_PROTO_CMD_SET_METADATA		= 13
-STORAGE_PROTO_CMD_DOWNLOAD_FILE		= 14
-STORAGE_PROTO_CMD_GET_METADATA		= 15
-STORAGE_PROTO_CMD_SYNC_CREATE_FILE	= 16
-STORAGE_PROTO_CMD_SYNC_DELETE_FILE	= 17
-STORAGE_PROTO_CMD_SYNC_UPDATE_FILE	= 18
-STORAGE_PROTO_CMD_SYNC_CREATE_LINK	= 19
-STORAGE_PROTO_CMD_CREATE_LINK		= 20
-STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE	= 21
-STORAGE_PROTO_CMD_QUERY_FILE_INFO	= 22
-STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE	= 23   #create appender file
-STORAGE_PROTO_CMD_APPEND_FILE		= 24   #append file
-STORAGE_PROTO_CMD_SYNC_APPEND_FILE	= 25
-STORAGE_PROTO_CMD_FETCH_ONE_PATH_BINLOG	= 26   #fetch binlog of one store path
-STORAGE_PROTO_CMD_RESP			=       TRACKER_PROTO_CMD_RESP
-STORAGE_PROTO_CMD_UPLOAD_MASTER_FILE    =	STORAGE_PROTO_CMD_UPLOAD_FILE
-
-STORAGE_PROTO_CMD_TRUNK_ALLOC_SPACE   	     = 27  #since V3.00
-STORAGE_PROTO_CMD_TRUNK_ALLOC_CONFIRM	     = 28  #since V3.00
-STORAGE_PROTO_CMD_TRUNK_FREE_SPACE	     = 29  #since V3.00
-STORAGE_PROTO_CMD_TRUNK_SYNC_BINLOG	     = 30  #since V3.00
-STORAGE_PROTO_CMD_TRUNK_GET_BINLOG_SIZE	     = 31  #since V3.07
-STORAGE_PROTO_CMD_TRUNK_DELETE_BINLOG_MARKS  = 32  #since V3.07
-STORAGE_PROTO_CMD_TRUNK_TRUNCATE_BINLOG_FILE = 33  #since V3.07
-
-STORAGE_PROTO_CMD_MODIFY_FILE		  =  34  #since V3.08
-STORAGE_PROTO_CMD_SYNC_MODIFY_FILE	  =  35  #since V3.08
-STORAGE_PROTO_CMD_TRUNCATE_FILE		  =  36  #since V3.08
-STORAGE_PROTO_CMD_SYNC_TRUNCATE_FILE	  =  37  #since V3.08
-
-#for overwrite all old metadata
-STORAGE_SET_METADATA_FLAG_OVERWRITE       =     'O'
-STORAGE_SET_METADATA_FLAG_OVERWRITE_STR   =	"O"
-#for replace, insert when the meta item not exist, otherwise update it
-STORAGE_SET_METADATA_FLAG_MERGE	          =	'M'
-STORAGE_SET_METADATA_FLAG_MERGE_STR       =	"M"
-
-FDFS_RECORD_SEPERATOR      =	'\x01'
-FDFS_FIELD_SEPERATOR       =	'\x02'
-
-#common constants
-FDFS_STORAGE_ID_MAX_SIZE   =    16
-FDFS_GROUP_NAME_MAX_LEN	   =    16
-IP_ADDRESS_SIZE            =    16
-FDFS_PROTO_PKG_LEN_SIZE	   =	8
-FDFS_PROTO_CMD_SIZE	   =	1
-FDFS_PROTO_STATUS_SIZE     =    1
-FDFS_PROTO_IP_PORT_SIZE	   =	(IP_ADDRESS_SIZE + 6)
-FDFS_MAX_SERVERS_EACH_GROUP =	32
-FDFS_MAX_GROUPS		   =    512
-FDFS_MAX_TRACKERS	   =	16
-FDFS_DOMAIN_NAME_MAX_LEN   =    128
-
-FDFS_MAX_META_NAME_LEN	   =	 64
-FDFS_MAX_META_VALUE_LEN	   =	256
-
-FDFS_FILE_PREFIX_MAX_LEN   =	16
-FDFS_LOGIC_FILE_PATH_LEN   =	10
-FDFS_TRUE_FILE_PATH_LEN	   =	 6
-FDFS_FILENAME_BASE64_LENGTH  =   27
-FDFS_TRUNK_FILE_INFO_LEN   =    16
-FDFS_FILE_EXT_NAME_MAX_LEN =    6
-FDFS_SPACE_SIZE_BASE_INDEX =    2   # storage space size based (MB)
-
-FDFS_UPLOAD_BY_BUFFER      =     1
-FDFS_UPLOAD_BY_FILENAME    =     2
-FDFS_UPLOAD_BY_FILE        =     3
-FDFS_DOWNLOAD_TO_BUFFER    =     1
-FDFS_DOWNLOAD_TO_FILE      =     2
-
-FDFS_NORMAL_LOGIC_FILENAME_LENGTH  = (FDFS_LOGIC_FILE_PATH_LEN + \
-		FDFS_FILENAME_BASE64_LENGTH + FDFS_FILE_EXT_NAME_MAX_LEN + 1)
-
-FDFS_TRUNK_FILENAME_LENGTH         = (FDFS_TRUE_FILE_PATH_LEN + \
-                                    FDFS_FILENAME_BASE64_LENGTH + \
-                                    FDFS_TRUNK_FILE_INFO_LEN + \
-                                    1 + FDFS_FILE_EXT_NAME_MAX_LEN)
-FDFS_TRUNK_LOGIC_FILENAME_LENGTH   = (FDFS_TRUNK_FILENAME_LENGTH + \
+TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE = 102
+TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE = 103
+TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE = 104
+TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL = 105
+TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL = 106
+TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL = 107
+TRACKER_PROTO_CMD_RESP = 100
+FDFS_PROTO_CMD_ACTIVE_TEST = 111  # active test, tracker and storage both support since V1.28
+
+STORAGE_PROTO_CMD_REPORT_CLIENT_IP = 9  # ip as tracker client
+STORAGE_PROTO_CMD_UPLOAD_FILE = 11
+STORAGE_PROTO_CMD_DELETE_FILE = 12
+STORAGE_PROTO_CMD_SET_METADATA = 13
+STORAGE_PROTO_CMD_DOWNLOAD_FILE = 14
+STORAGE_PROTO_CMD_GET_METADATA = 15
+STORAGE_PROTO_CMD_SYNC_CREATE_FILE = 16
+STORAGE_PROTO_CMD_SYNC_DELETE_FILE = 17
+STORAGE_PROTO_CMD_SYNC_UPDATE_FILE = 18
+STORAGE_PROTO_CMD_SYNC_CREATE_LINK = 19
+STORAGE_PROTO_CMD_CREATE_LINK = 20
+STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE = 21
+STORAGE_PROTO_CMD_QUERY_FILE_INFO = 22
+STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE = 23  # create appender file
+STORAGE_PROTO_CMD_APPEND_FILE = 24  # append file
+STORAGE_PROTO_CMD_SYNC_APPEND_FILE = 25
+STORAGE_PROTO_CMD_FETCH_ONE_PATH_BINLOG = 26  # fetch binlog of one store path
+STORAGE_PROTO_CMD_RESP = TRACKER_PROTO_CMD_RESP
+STORAGE_PROTO_CMD_UPLOAD_MASTER_FILE = STORAGE_PROTO_CMD_UPLOAD_FILE
+
+STORAGE_PROTO_CMD_TRUNK_ALLOC_SPACE = 27  # since V3.00
+STORAGE_PROTO_CMD_TRUNK_ALLOC_CONFIRM = 28  # since V3.00
+STORAGE_PROTO_CMD_TRUNK_FREE_SPACE = 29  # since V3.00
+STORAGE_PROTO_CMD_TRUNK_SYNC_BINLOG = 30  # since V3.00
+STORAGE_PROTO_CMD_TRUNK_GET_BINLOG_SIZE = 31  # since V3.07
+STORAGE_PROTO_CMD_TRUNK_DELETE_BINLOG_MARKS = 32  # since V3.07
+STORAGE_PROTO_CMD_TRUNK_TRUNCATE_BINLOG_FILE = 33  # since V3.07
+
+STORAGE_PROTO_CMD_MODIFY_FILE = 34  # since V3.08
+STORAGE_PROTO_CMD_SYNC_MODIFY_FILE = 35  # since V3.08
+STORAGE_PROTO_CMD_TRUNCATE_FILE = 36  # since V3.08
+STORAGE_PROTO_CMD_SYNC_TRUNCATE_FILE = 37  # since V3.08
+
+# for overwrite all old metadata
+STORAGE_SET_METADATA_FLAG_OVERWRITE = 'O'
+STORAGE_SET_METADATA_FLAG_OVERWRITE_STR = "O"
+# for replace, insert when the meta item not exist, otherwise update it
+STORAGE_SET_METADATA_FLAG_MERGE = 'M'
+STORAGE_SET_METADATA_FLAG_MERGE_STR = "M"
+
+FDFS_RECORD_SEPERATOR = b'\x01'
+FDFS_FIELD_SEPERATOR = b'\x02'
+
+# common constants
+FDFS_STORAGE_ID_MAX_SIZE = 16
+FDFS_GROUP_NAME_MAX_LEN = 16
+IP_ADDRESS_SIZE = 16
+FDFS_PROTO_PKG_LEN_SIZE = 8
+FDFS_PROTO_CMD_SIZE = 1
+FDFS_PROTO_STATUS_SIZE = 1
+FDFS_PROTO_IP_PORT_SIZE = (IP_ADDRESS_SIZE + 6)
+FDFS_MAX_SERVERS_EACH_GROUP = 32
+FDFS_MAX_GROUPS = 512
+FDFS_MAX_TRACKERS = 16
+FDFS_DOMAIN_NAME_MAX_LEN = 128
+
+FDFS_MAX_META_NAME_LEN = 64
+FDFS_MAX_META_VALUE_LEN = 256
+
+FDFS_FILE_PREFIX_MAX_LEN = 16
+FDFS_LOGIC_FILE_PATH_LEN = 10
+FDFS_TRUE_FILE_PATH_LEN = 6
+FDFS_FILENAME_BASE64_LENGTH = 27
+FDFS_TRUNK_FILE_INFO_LEN = 16
+FDFS_FILE_EXT_NAME_MAX_LEN = 6
+FDFS_SPACE_SIZE_BASE_INDEX = 2  # storage space size based (MB)
+
+FDFS_UPLOAD_BY_BUFFER = 1
+FDFS_UPLOAD_BY_FILENAME = 2
+FDFS_UPLOAD_BY_FILE = 3
+FDFS_DOWNLOAD_TO_BUFFER = 1
+FDFS_DOWNLOAD_TO_FILE = 2
+
+FDFS_NORMAL_LOGIC_FILENAME_LENGTH = (FDFS_LOGIC_FILE_PATH_LEN + \
+                                     FDFS_FILENAME_BASE64_LENGTH + FDFS_FILE_EXT_NAME_MAX_LEN + 1)
+
+FDFS_TRUNK_FILENAME_LENGTH = (FDFS_TRUE_FILE_PATH_LEN + \
+                              FDFS_FILENAME_BASE64_LENGTH + \
+                              FDFS_TRUNK_FILE_INFO_LEN + \
+                              1 + FDFS_FILE_EXT_NAME_MAX_LEN)
+FDFS_TRUNK_LOGIC_FILENAME_LENGTH = (FDFS_TRUNK_FILENAME_LENGTH + \
                                     (FDFS_LOGIC_FILE_PATH_LEN - \
                                      FDFS_TRUE_FILE_PATH_LEN))
 
-FDFS_VERSION_SIZE	  =	6
-
-TRACKER_QUERY_STORAGE_FETCH_BODY_LEN  = (FDFS_GROUP_NAME_MAX_LEN \
-			+ IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE)
-TRACKER_QUERY_STORAGE_STORE_BODY_LEN  =	(FDFS_GROUP_NAME_MAX_LEN \
-			+ IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE + 1)
-#status code, order is important!
-FDFS_STORAGE_STATUS_INIT       =	  0
-FDFS_STORAGE_STATUS_WAIT_SYNC  =	  1
-FDFS_STORAGE_STATUS_SYNCING    =	  2
-FDFS_STORAGE_STATUS_IP_CHANGED =          3
-FDFS_STORAGE_STATUS_DELETED    =	  4
-FDFS_STORAGE_STATUS_OFFLINE    =	  5
-FDFS_STORAGE_STATUS_ONLINE     =	  6
-FDFS_STORAGE_STATUS_ACTIVE     =	  7
-FDFS_STORAGE_STATUS_RECOVERY   =	  9
-FDFS_STORAGE_STATUS_NONE       =	 99
+FDFS_VERSION_SIZE = 6
+
+TRACKER_QUERY_STORAGE_FETCH_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN \
+                                        + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE)
+TRACKER_QUERY_STORAGE_STORE_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN \
+                                        + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE + 1)
+# status code, order is important!
+FDFS_STORAGE_STATUS_INIT = 0
+FDFS_STORAGE_STATUS_WAIT_SYNC = 1
+FDFS_STORAGE_STATUS_SYNCING = 2
+FDFS_STORAGE_STATUS_IP_CHANGED = 3
+FDFS_STORAGE_STATUS_DELETED = 4
+FDFS_STORAGE_STATUS_OFFLINE = 5
+FDFS_STORAGE_STATUS_ONLINE = 6
+FDFS_STORAGE_STATUS_ACTIVE = 7
+FDFS_STORAGE_STATUS_RECOVERY = 9
+FDFS_STORAGE_STATUS_NONE = 99
+
 
 class Storage_server(object):
-    '''Class storage server for upload.'''
+    """Class storage server for upload."""
+
     def __init__(self):
         self.ip_addr = None
         self.port = None
         self.group_name = ''
         self.store_path_index = 0
 
+
 # Class tracker_header
 class Tracker_header(object):
-    '''
+    """
     Class for Pack or Unpack tracker header
         struct tracker_header{
             char pkg_len[FDFS_PROTO_PKG_LEN_SIZE],
             char cmd,
             char status,
         }
-    '''
+    """
+
     def __init__(self):
-        self.fmt = '!QBB' # pkg_len[FDFS_PROTO_PKG_LEN_SIZE] + cmd + status
+        self.fmt = '!QBB'  # pkg_len[FDFS_PROTO_PKG_LEN_SIZE] + cmd + status
         self.st = struct.Struct(self.fmt)
         self.pkg_len = 0
         self.cmd = 0
         self.status = 0
-    
-    def _pack(self, pkg_len = 0, cmd = 0, status = 0):
+
+    def _pack(self, pkg_len=0, cmd=0, status=0):
         return self.st.pack(pkg_len, cmd, status)
 
     def _unpack(self, bytes_stream):
@@ -189,38 +193,39 @@ class Tracker_header(object):
         return True
 
     def header_len(self):
-        return self.st.size 
-    
+        return self.st.size
+
     def send_header(self, conn):
-        '''Send Tracker header to server.'''
+        """Send Tracker header to server."""
         header = self._pack(self.pkg_len, self.cmd, self.status)
         try:
             conn.sendall(header)
-        except (socket.error, socket.timeout), e:
+        except (socket.error, socket.timeout) as e:
             raise ConnectionError('[-] Error: while writting to socket: %s' \
                                   % (e.args,))
-        
+
     def recv_header(self, conn):
-        '''Receive response from server.
+        """Receive response from server.
            if sucess, class member (pkg_len, cmd, status) is response.
-        '''
+        """
         try:
             header = conn.recv(self.header_len())
-        except (socket.error, socket.timeout), e:
+        except (socket.error, socket.timeout) as e:
             raise ConnectionError('[-] Error: while reading from socket: %s' \
                                   % (e.args,))
         if not header:
             raise ConnectionError("Socket closed on remote end")
         self._unpack(header)
 
+
 def fdfs_pack_metadata(meta_dict):
-    ret = ''
+    ret = b''
     for key in meta_dict:
-        ret += '%s%c%s%c' % (key, FDFS_FIELD_SEPERATOR, \
-                             meta_dict[key], FDFS_RECORD_SEPERATOR)
+        ret += b'%s%c%s%c' % (key, FDFS_FIELD_SEPERATOR, meta_dict[key], FDFS_RECORD_SEPERATOR)
     return ret[0:-1]
 
+
 def fdfs_unpack_metadata(bytes_stream):
     li = bytes_stream.split(FDFS_RECORD_SEPERATOR)
-    return dict([item.split(FDFS_FIELD_SEPERATOR) for item in li])
-    
+    res = [item.split(FDFS_FIELD_SEPERATOR) for item in li]
+    return map(lambda l: map(lambda ll: ll.decode(), l), res)

+ 135 - 115
fdfs_client/fdfs_test.py

@@ -13,6 +13,7 @@ except ImportError:
     from fdfs_client.client import *
     from fdfs_client.exceptions import *
 
+
 def usage():
     s = 'Usage: python fdfs_test.py {options} [{local_filename} [{remote_file_id}]]\n'
     s += 'options: upfile, upbuffer, downfile, downbuffer, delete, listgroup, listserv\n'
@@ -33,15 +34,16 @@ def usage():
     s += '\tmodifyfile {local_filename} {remote_fileid} {file_offset}\n'
     s += '\tmodifybuffer {local_filename} {remote_fileid} {file_offset}\n'
     s += 'e.g.: python fdfs_test.py upfile test'
-    print s
+    print(s)
     sys.exit(0)
 
+
 if len(sys.argv) < 2:
     usage()
 
-
 client = Fdfs_client('client.conf')
 
+
 def upfile_func():
     # Upload by filename
     # usage: python fdfs_test.py upfile {local_filename}
@@ -53,17 +55,18 @@ def upfile_func():
         file_size = os.stat(local_filename).st_size
         # meta_buffer can be null.
         meta_dict = {
-            'ext_name' : 'py',
-            'file_size' : str(file_size) + 'B'
+            'ext_name': 'py',
+            'file_size': str(file_size) + 'B'
         }
         t1 = time.time()
         ret_dict = client.upload_by_filename(local_filename, meta_dict)
         t2 = time.time()
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-        print '[+] time consume: %fs' % (t2 - t1)
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('[+] %s : %s' % (key, ret_dict[key]))
+        print('[+] time consume: %fs' % (t2 - t1))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def upfileex_func():
     # Upload by file
@@ -77,10 +80,11 @@ def upfileex_func():
         ret_dict = client.upload_by_file(local_filename)
         t2 = time.time()
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-        print '[+] time consume: %fs' % (t2 - t1)
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('[+] %s : %s' % (key, ret_dict[key]))
+        print('[+] time consume: %fs' % (t2 - t1))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def upslavefile_func():
     # upload slave file
@@ -92,12 +96,13 @@ def upslavefile_func():
         local_filename = sys.argv[2]
         remote_fileid = sys.argv[3]
         prefix_name = sys.argv[4]
-        ret_dict = client.upload_slave_by_file(local_filename, remote_fileid, \
-                                                   prefix_name)
+        ret_dict = client.upload_slave_by_file(local_filename, remote_fileid,
+                                               prefix_name)
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def upslavebuffer_func():
     # upload slave by buffer
@@ -111,14 +116,14 @@ def upslavebuffer_func():
         prefix_name = sys.argv[4]
         with open(local_filename, 'rb') as f:
             filebuffer = f.read()
-            ret_dict = client.upload_slave_by_buffer(local_filename, \
+            ret_dict = client.upload_slave_by_buffer(local_filename,
                                                      remote_fileid, prefix_name)
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
-        
-        
+            print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
+
 def del_func():
     # delete file
     # usage: python fdfs_test.py delete {remote_fileid}
@@ -128,11 +133,12 @@ def del_func():
     try:
         remote_file_id = sys.argv[2]
         ret_tuple = client.delete_file(remote_file_id)
-        print '[+] %s' % ret_tuple[0]
-        print '[+] remote_fileid: %s' % ret_tuple[1]
-        print '[+] Storage IP: %s' % ret_tuple[2]
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+        print('[+] %s' % ret_tuple[0])
+        print('[+] remote_fileid: %s' % ret_tuple[1])
+        print('[+] Storage IP: %s' % ret_tuple[2])
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def downfile_func():
     # Download to file
@@ -145,9 +151,10 @@ def downfile_func():
         remote_fileid = sys.argv[3]
         ret_dict = client.download_to_file(local_filename, remote_fileid)
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def list_group_func():
     # List one group info
@@ -158,9 +165,10 @@ def list_group_func():
     try:
         group_name = sys.argv[2]
         ret = client.list_one_group(group_name)
-        print ret
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+        print(ret)
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def listall_func():
     # List all group info
@@ -170,14 +178,15 @@ def listall_func():
         return None
     try:
         ret_dict = client.list_all_groups()
-        print '=' * 80
-        print 'Groups count:', ret_dict['Groups count']
+        print('=' * 80)
+        print('Groups count:', ret_dict['Groups count'])
         for li in ret_dict['Groups']:
-            print '-' * 80
-            print li
-            print '-' * 80
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('-' * 80)
+            print(li)
+            print('-' * 80)
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def list_server_func():
     # List all servers info of group
@@ -192,18 +201,19 @@ def list_server_func():
         else:
             storage_ip = None
         ret_dict = client.list_servers(group_name, storage_ip)
-        print '=' * 80
-        print 'Group name: %s' % ret_dict['Group name']
-        print '=' * 80
+        print('=' * 80)
+        print('Group name: %s' % ret_dict['Group name'])
+        print('=' * 80)
         i = 1
         for serv in ret_dict['Servers']:
-            print 'Storage server %d:' % i
-            print '=' * 80
-            print serv
+            print('Storage server %d:' % i)
+            print('=' * 80)
+            print(serv)
             i += 1
-            print '=' * 80
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('=' * 80)
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def upbuffer_func():
     # Upload by buffer
@@ -218,18 +228,19 @@ def upbuffer_func():
         ext_name = None
     # meta_buffer can be null.
     meta_buffer = {
-        'ext_name' : 'gif',
-        'width'    : '150px',
-        'height'   : '80px'
+        'ext_name': 'gif',
+        'width': '150px',
+        'height': '80px'
     }
     try:
         with open(local_filename, 'rb') as f:
             file_buffer = f.read()
             ret_dict = client.upload_by_buffer(file_buffer, ext_name, meta_buffer)
             for key in ret_dict:
-                print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+                print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def downbuffer_func():
     # Download to buffer
@@ -241,10 +252,11 @@ def downbuffer_func():
     remote_fileid = sys.argv[2]
     try:
         ret_dict = client.download_to_buffer(remote_fileid)
-        print 'Downloaded content:'
-        print ret_dict['Content']
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+        print('Downloaded content:')
+        print(ret_dict['Content'])
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def get_meta_data_func():
     # Get meta data of remote file
@@ -255,9 +267,10 @@ def get_meta_data_func():
     remote_fileid = sys.argv[2]
     try:
         ret_dict = client.get_meta_data(remote_fileid)
-        print ret_dict
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+        print(ret_dict)
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def set_meta_data_func():
     # Set meta data of remote file
@@ -267,16 +280,17 @@ def set_meta_data_func():
         return None
     remote_fileid = sys.argv[2]
     meta_dict = {
-        'ext_name' : 'jgp',
-        'width'    : '160px',
-        'hight'    : '80px',
+        'ext_name': 'jgp',
+        'width': '160px',
+        'hight': '80px',
     }
     try:
         ret_dict = client.set_meta_data(remote_fileid, meta_dict)
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def upappendfile_func():
     # Upload an appender file by filename
@@ -288,9 +302,10 @@ def upappendfile_func():
     try:
         ret_dict = client.upload_appender_by_file(local_filename)
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def upappendbuffer_func():
     # Upload an appender file by buffer
@@ -304,9 +319,10 @@ def upappendbuffer_func():
             file_buffer = f.read()
             ret_dict = client.upload_appender_by_buffer(file_buffer)
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def appendfile_func():
     # Append a remote file
@@ -319,9 +335,10 @@ def appendfile_func():
     try:
         ret_dict = client.append_by_file(local_filename, remote_fileid)
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def appendbuffer_func():
     # Append a remote file by buffer
@@ -336,9 +353,10 @@ def appendbuffer_func():
             filebuffer = f.read()
             ret_dict = client.append_by_buffer(filebuffer, remote_fileid)
             for key in ret_dict:
-                print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+                print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def truncate_func():
     # Truncate file 
@@ -346,14 +364,15 @@ def truncate_func():
     if len(sys.argv) < 4:
         usage()
         return None
-    truncate_filesize = long(sys.argv[2])
+    truncate_filesize = sys.argv[2]
     remote_fileid = sys.argv[3]
     try:
         ret_dict = client.truncate_file(truncate_filesize, remote_fileid)
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def modifyfile_func():
     # Modify file by filename
@@ -364,15 +383,16 @@ def modifyfile_func():
     local_filename = sys.argv[2]
     remote_fileid = sys.argv[3]
     if len(sys.argv) > 4:
-        file_offset = long(sys.argv[4])
+        file_offset = sys.argv[4]
     else:
         file_offset = 0
     try:
         ret_dict = client.modify_by_filename(local_filename, remote_fileid, file_offset)
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
+            print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
 
 def modifybuffer_func():
     # Modify file by buffer
@@ -383,7 +403,7 @@ def modifybuffer_func():
     local_filename = sys.argv[2]
     remote_fileid = sys.argv[3]
     if len(sys.argv) > 4:
-        file_offset = long(sys.argv[4])
+        file_offset = sys.argv[4]
     else:
         file_offset = 0
     try:
@@ -391,30 +411,30 @@ def modifybuffer_func():
             filebuffer = f.read()
         ret_dict = client.modify_by_buffer(filebuffer, remote_fileid, file_offset)
         for key in ret_dict:
-            print '[+] %s : %s' % (key, ret_dict[key])
-    except (ConnectionError, ResponseError, DataError), e:
-        print e
-    
-    
+            print('[+] %s : %s' % (key, ret_dict[key]))
+    except (ConnectionError, ResponseError, DataError) as e:
+        print(e)
+
+
 result = {
-    'upfile'     : lambda : upfile_func(),
-    'upfileex'   : lambda : upfileex_func(),
-    'upbuffer'   : lambda : upbuffer_func(),
-    'delete'     : lambda : del_func(),
-    'downfile'   : lambda : downfile_func(),
-    'downbuffer' : lambda : downbuffer_func(),
-    'listgroup'  : lambda : list_group_func(),
-    'listall'    : lambda : listall_func(),
-    'listsrv'    : lambda : list_server_func(),
-    'getmeta'    : lambda : get_meta_data_func(),
-    'setmeta'    : lambda : set_meta_data_func(),
-    'upslavefile' : lambda : upslavefile_func(),
-    'upappendfile' : lambda : upappendfile_func(),
-    'upappendbuffer' : lambda : upappendbuffer_func(),
-    'appendfile' : lambda : appendfile_func(),
-    'appendbuffer' : lambda : appendbuffer_func(),
-    'truncate'   : lambda : truncate_func(),
-    'modifyfile' : lambda : modifyfile_func(),
-    'modifybuffer' : lambda : modifybuffer_func(),
-    '-h'         : lambda : usage(),
+    'upfile': lambda: upfile_func(),
+    'upfileex': lambda: upfileex_func(),
+    'upbuffer': lambda: upbuffer_func(),
+    'delete': lambda: del_func(),
+    'downfile': lambda: downfile_func(),
+    'downbuffer': lambda: downbuffer_func(),
+    'listgroup': lambda: list_group_func(),
+    'listall': lambda: listall_func(),
+    'listsrv': lambda: list_server_func(),
+    'getmeta': lambda: get_meta_data_func(),
+    'setmeta': lambda: set_meta_data_func(),
+    'upslavefile': lambda: upslavefile_func(),
+    'upappendfile': lambda: upappendfile_func(),
+    'upappendbuffer': lambda: upappendbuffer_func(),
+    'appendfile': lambda: appendfile_func(),
+    'appendbuffer': lambda: appendbuffer_func(),
+    'truncate': lambda: truncate_func(),
+    'modifyfile': lambda: modifyfile_func(),
+    'modifybuffer': lambda: modifybuffer_func(),
+    '-h': lambda: usage(),
 }[sys.argv[1].lower()]()

+ 141 - 155
fdfs_client/storage_client.py

@@ -19,7 +19,8 @@ from fdfs_client.exceptions import (
 )
 from fdfs_client.utils import *
 
-def tcp_send_file(conn, filename, buffer_size = 1024):
+
+def tcp_send_file(conn, filename, buffer_size=1024):
     '''
     Send file to server, and split into multiple pkgs while sending.
     arguments:
@@ -34,17 +35,18 @@ def tcp_send_file(conn, filename, buffer_size = 1024):
             try:
                 send_buffer = f.read(buffer_size)
                 send_size = len(send_buffer)
-                if  send_size == 0:
+                if send_size == 0:
                     break
                 tcp_send_data(conn, send_buffer)
                 file_size += send_size
-            except ConnectionError, e:
+            except ConnectionError as e:
                 raise ConnectionError('[-] Error while uploading file(%s).' % e.args)
-            except IOError, e:
+            except IOError as e:
                 raise DataError('[-] Error while reading local file(%s).' % e.args)
     return file_size
 
-def tcp_send_file_ex(conn, filename, buffer_size = 4096):
+
+def tcp_send_file_ex(conn, filename, buffer_size=4096):
     '''
     Send file to server. Using linux system call 'sendfile'.
     arguments:
@@ -66,14 +68,14 @@ def tcp_send_file_ex(conn, filename, buffer_size = 4096):
                     break
                 nbytes += sent
                 offset += sent
-            except OSError, e:
+            except OSError as e:
                 if e.errno == errno.EAGAIN:
                     continue
                 raise
     return nbytes
-        
 
-def tcp_recv_file(conn, local_filename, file_size, buffer_size = 1024):
+
+def tcp_recv_file(conn, local_filename, file_size, buffer_size=1024):
     '''
     Receive file from server, fragmented it while receiving and write to disk.
     arguments:
@@ -102,22 +104,24 @@ def tcp_recv_file(conn, local_filename, file_size, buffer_size = 1024):
                 if flush_size >= 4096:
                     f.flush()
                     flush_size = 0
-            except ConnectionError, e:
+            except ConnectionError as e:
                 raise ConnectionError('[-] Error: while downloading file(%s).' % e.args)
-            except IOError, e:
+            except IOError as e:
                 raise DataError('[-] Error: while writting local file(%s).' % e.args)
     return total_file_size
-    
+
+
 class Storage_client(object):
-    '''
+    """
     The Class Storage_client for storage server.
     Note: argument host_tuple of storage server ip address, that should be a single element.
-    '''
+    """
+
     def __init__(self, *kwargs):
         conn_kwargs = {
-            'name'       : 'Storage Pool',
-            'host_tuple' : ((kwargs[0],kwargs[1]),),
-            'timeout'    : kwargs[2]
+            'name': 'Storage Pool',
+            'host_tuple': ((kwargs[0], kwargs[1]),),
+            'timeout': kwargs[2]
         }
         self.pool = ConnectionPool(**conn_kwargs)
         return None
@@ -129,7 +133,7 @@ class Storage_client(object):
         except:
             pass
 
-    def update_pool(self, old_store_serv, new_store_serv, timeout = 30):
+    def update_pool(self, old_store_serv, new_store_serv, timeout=30):
         '''
         Update connection pool of storage client.
         We need update connection pool of storage client, while storage server is changed.
@@ -139,20 +143,19 @@ class Storage_client(object):
             return None
         self.pool.destroy()
         conn_kwargs = {
-            'name'       : 'Storage_pool',
-            'host_tuple' : ((new_store_serv.ip_addr,new_store_serv.port),),
-            'timeout'    : timeout
+            'name': 'Storage_pool',
+            'host_tuple': ((new_store_serv.ip_addr, new_store_serv.port),),
+            'timeout': timeout
         }
         self.pool = ConnectionPool(**conn_kwargs)
         return True
-        
 
-    def _storage_do_upload_file(self, tracker_client, store_serv, \
-                               file_buffer, file_size = None, upload_type = None, \
-                               meta_dict = None, cmd = None, master_filename = None, \
-                               prefix_name = None, file_ext_name = None):
-        '''
+
+    def _storage_do_upload_file(self, tracker_client, store_serv, file_buffer, file_size=None, upload_type=None,
+                                meta_dict=None, cmd=None, master_filename=None, prefix_name=None, file_ext_name=None):
+        """
         core of upload file.
+        :rtype : object
         arguments:
         @tracker_client: Tracker_client, it is useful connect to tracker server
         @store_serv: Storage_server, it is return from query tracker server
@@ -165,7 +168,7 @@ class Storage_client(object):
         @master_filename: string, useful upload slave file
         @prefix_name: string
         @file_ext_name: string
-        @Return dictionary 
+        @Return dictionary
                  {
                      'Group name'      : group_name,
                      'Remote file_id'  : remote_file_id,
@@ -175,33 +178,32 @@ class Storage_client(object):
                      'Storage IP'      : storage_ip
                  }
 
-        '''
-        
+        """
+
+        print('getting connection')
         store_conn = self.pool.get_connection()
+        print(store_conn)
         th = Tracker_header()
+        print(th)
         master_filename_len = len(master_filename) if master_filename else 0
         prefix_name_len = len(prefix_name) if prefix_name else 0
         upload_slave = len(store_serv.group_name) and master_filename_len
         file_ext_name = str(file_ext_name) if file_ext_name else ''
-        #non_slave_fmt |-store_path_index(1)-file_size(8)-file_ext_name(6)-|
+        # non_slave_fmt |-store_path_index(1)-file_size(8)-file_ext_name(6)-|
         non_slave_fmt = '!B Q %ds' % FDFS_FILE_EXT_NAME_MAX_LEN
-        #slave_fmt |-master_len(8)-file_size(8)-prefix_name(16)-file_ext_name(6)
+        # slave_fmt |-master_len(8)-file_size(8)-prefix_name(16)-file_ext_name(6)
         #           -master_name(master_filename_len)-|
-        slave_fmt = '!Q Q %ds %ds %ds' % (FDFS_FILE_PREFIX_MAX_LEN, \
-                                          FDFS_FILE_EXT_NAME_MAX_LEN, \
-                                          master_filename_len)
+        slave_fmt = '!Q Q %ds %ds %ds' % (FDFS_FILE_PREFIX_MAX_LEN, FDFS_FILE_EXT_NAME_MAX_LEN, master_filename_len)
         th.pkg_len = struct.calcsize(slave_fmt) if upload_slave \
-                                                else struct.calcsize(non_slave_fmt)
+            else struct.calcsize(non_slave_fmt)
         th.pkg_len += file_size
         th.cmd = cmd
         th.send_header(store_conn)
         if upload_slave:
-            send_buffer = struct.pack(slave_fmt, master_filename_len, file_size, \
-                                       prefix_name, file_ext_name, \
-                                                  master_filename)
+            send_buffer = struct.pack(slave_fmt, master_filename_len, file_size, prefix_name, file_ext_name,
+                                      master_filename)
         else:
-            send_buffer = struct.pack(non_slave_fmt, store_serv.store_path_index, \
-                                                    file_size, file_ext_name)
+            send_buffer = struct.pack(non_slave_fmt, store_serv.store_path_index, file_size, file_ext_name.encode())
         try:
             tcp_send_data(store_conn, send_buffer)
             if upload_type == FDFS_UPLOAD_BY_FILENAME:
@@ -218,16 +220,15 @@ class Storage_client(object):
                 errmsg = '[-] Error: Storage response length is not match, '
                 errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
                 raise ResponseError(errmsg)
-            #recv_fmt: |-group_name(16)-remote_file_name(recv_size - 16)-|
+            # recv_fmt: |-group_name(16)-remote_file_name(recv_size - 16)-|
             recv_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, \
-                                 th.pkg_len - FDFS_GROUP_NAME_MAX_LEN)
+                                     th.pkg_len - FDFS_GROUP_NAME_MAX_LEN)
             (group_name, remote_name) = struct.unpack(recv_fmt, recv_buffer)
-            remote_filename = remote_name.strip('\x00')
+            remote_filename = remote_name.strip(b'\x00').decode()
             if meta_dict and len(meta_dict) > 0:
-                status = self.storage_set_metadata(tracker_client, store_serv, \
-                                        remote_filename, meta_dict)
-                if status != 0: 
-                    #rollback
+                status = self.storage_set_metadata(tracker_client, store_serv, remote_filename, meta_dict)
+                if status != 0:
+                    # rollback
                     self.storage_delete_file(tracker_client, store_serv, remote_filename)
                     raise DataError('[-] Error: %d, %s' % (status, os.strerror(status)))
         except:
@@ -235,40 +236,40 @@ class Storage_client(object):
         finally:
             self.pool.release(store_conn)
         ret_dic = {
-            'Group name'      : group_name.strip('\x00'),
-            'Remote file_id'  : group_name.strip('\x00') + os.sep + \
-                                   remote_filename,
-            'Status'          : 'Upload successed.',
-            'Local file name' : file_buffer if (upload_type == FDFS_UPLOAD_BY_FILENAME \
-                                            or upload_type == FDFS_UPLOAD_BY_FILE) \
-                                            else '',
-            'Uploaded size'   : appromix(send_file_size) if (upload_type == \
-                                FDFS_UPLOAD_BY_FILENAME or upload_type == \
-                                FDFS_UPLOAD_BY_FILE) else appromix( len(file_buffer)),
-            'Storage IP'      : store_serv.ip_addr
+            'Group name': group_name.strip(b'\x00').decode(),
+            'Remote file_id': group_name.strip(b'\x00').decode() + os.sep + \
+                              remote_filename,
+            'Status': 'Upload successed.',
+            'Local file name': file_buffer if (
+                upload_type == FDFS_UPLOAD_BY_FILENAME or upload_type == FDFS_UPLOAD_BY_FILE) \
+                else '',
+            'Uploaded size': appromix(send_file_size) if (
+                upload_type == FDFS_UPLOAD_BY_FILENAME or upload_type == FDFS_UPLOAD_BY_FILE) else appromix(
+                len(file_buffer)),
+            'Storage IP': store_serv.ip_addr
         }
         return ret_dic
 
     def storage_upload_by_filename(self, tracker_client, store_serv, filename, \
-                                   meta_dict = None):
+                                   meta_dict=None):
         file_size = os.stat(filename).st_size
         file_ext_name = get_file_ext_name(filename)
-        return self._storage_do_upload_file(tracker_client, store_serv, filename, \
-                                            file_size, FDFS_UPLOAD_BY_FILENAME, meta_dict, \
-                                            STORAGE_PROTO_CMD_UPLOAD_FILE, None, \
+        return self._storage_do_upload_file(tracker_client, store_serv, filename,
+                                            file_size, FDFS_UPLOAD_BY_FILENAME, meta_dict,
+                                            STORAGE_PROTO_CMD_UPLOAD_FILE, None,
                                             None, file_ext_name)
 
-    def storage_upload_by_file(self, tracker_client, store_serv, filename, \
-                               meta_dict = None):
+    def storage_upload_by_file(self, tracker_client, store_serv, filename,
+                               meta_dict=None):
         file_size = os.stat(filename).st_size
         file_ext_name = get_file_ext_name(filename)
-        return self._storage_do_upload_file(tracker_client, store_serv, filename, \
-                                            file_size, FDFS_UPLOAD_BY_FILE, meta_dict, \
-                                            STORAGE_PROTO_CMD_UPLOAD_FILE, None, \
+        return self._storage_do_upload_file(tracker_client, store_serv, filename,
+                                            file_size, FDFS_UPLOAD_BY_FILE, meta_dict,
+                                            STORAGE_PROTO_CMD_UPLOAD_FILE, None,
                                             None, file_ext_name)
 
-    def storage_upload_by_buffer(self, tracker_client, store_serv, \
-                                 file_buffer, file_ext_name = None, meta_dict = None):
+    def storage_upload_by_buffer(self, tracker_client, store_serv,
+                                 file_buffer, file_ext_name=None, meta_dict=None):
         buffer_size = len(file_buffer)
         return self._storage_do_upload_file(tracker_client, store_serv, file_buffer, \
                                             buffer_size, FDFS_UPLOAD_BY_BUFFER, meta_dict, \
@@ -277,7 +278,7 @@ class Storage_client(object):
 
     def storage_upload_slave_by_filename(self, tracker_client, store_serv, \
                                          filename, prefix_name, remote_filename, \
-                                         meta_dict = None):
+                                         meta_dict=None):
         file_size = os.stat(filename).st_size
         file_ext_name = get_file_ext_name(filename)
         return self._storage_do_upload_file(tracker_client, store_serv, filename, \
@@ -287,46 +288,44 @@ class Storage_client(object):
                                             file_ext_name)
 
     def storage_upload_slave_by_file(self, tracker_client, store_serv, \
-                                         filename, prefix_name, remote_filename, \
-                                         meta_dict = None):
+                                     filename, prefix_name, remote_filename, \
+                                     meta_dict=None):
         file_size = os.stat(filename).st_size
         file_ext_name = get_file_ext_name(filename)
-        return self._storage_do_upload_file(tracker_client, store_serv, filename, \
-                                            file_size, FDFS_UPLOAD_BY_FILE, meta_dict, \
-                                            STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, \
-                                            remote_filename, prefix_name, \
+        return self._storage_do_upload_file(tracker_client, store_serv, filename,
+                                            file_size, FDFS_UPLOAD_BY_FILE, meta_dict,
+                                            STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE,
+                                            remote_filename, prefix_name,
                                             file_ext_name)
 
-    def storage_upload_slave_by_buffer(self, tracker_client, store_serv, \
-                                       filebuffer, remote_filename, meta_dict, \
+    def storage_upload_slave_by_buffer(self, tracker_client, store_serv,
+                                       filebuffer, remote_filename, meta_dict,
                                        file_ext_name):
         file_size = len(filebuffer)
-        return self._storage_do_upload_file(tracker_client, store_serv, \
-                                            filebuffer, file_size, FDFS_UPLOAD_BY_BUFFER, \
-                                            meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, \
+        return self._storage_do_upload_file(tracker_client, store_serv,
+                                            filebuffer, file_size, FDFS_UPLOAD_BY_BUFFER,
+                                            meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE,
                                             None, remote_filename, file_ext_name)
 
-    def storage_upload_appender_by_filename(self, tracker_client, store_serv, \
-                                            filename, meta_dict = None):
+    def storage_upload_appender_by_filename(self, tracker_client, store_serv,
+                                            filename, meta_dict=None):
         file_size = os.stat(filename).st_size
         file_ext_name = get_file_ext_name(filename)
-        return self._storage_do_upload_file(tracker_client, store_serv, filename, \
-                                            file_size, FDFS_UPLOAD_BY_FILENAME, meta_dict, \
-                                            STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, \
+        return self._storage_do_upload_file(tracker_client, store_serv, filename,
+                                            file_size, FDFS_UPLOAD_BY_FILENAME, meta_dict,
+                                            STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE,
                                             None, None, file_ext_name)
 
-    def storage_upload_appender_by_file(self, tracker_client, store_serv, \
-                                            filename, meta_dict = None):
+    def storage_upload_appender_by_file(self, tracker_client, store_serv, filename, meta_dict=None):
         file_size = os.stat(filename).st_size
         file_ext_name = get_file_ext_name(filename)
-        return self._storage_do_upload_file(tracker_client, store_serv, filename, \
-                                            file_size, FDFS_UPLOAD_BY_FILE, meta_dict, \
-                                            STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, \
-                                            None, None, file_ext_name)
+        return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
+                                            meta_dict,
+                                            STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None, file_ext_name)
 
-    def storage_upload_appender_by_buffer(self, tracker_client, store_serv, \
-                                          file_buffer, meta_dict = None, \
-                                          file_ext_name = None):
+    def storage_upload_appender_by_buffer(self, tracker_client, store_serv,
+                                          file_buffer, meta_dict=None,
+                                          file_ext_name=None):
         file_size = len(file_buffer)
         return self._storage_do_upload_file(tracker_client, store_serv, file_buffer, \
                                             file_size, FDFS_UPLOAD_BY_BUFFER, meta_dict, \
@@ -344,17 +343,17 @@ class Storage_client(object):
         th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
         try:
             th.send_header(store_conn)
-            #del_fmt: |-group_name(16)-filename(len)-|
+            # del_fmt: |-group_name(16)-filename(len)-|
             del_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
-            send_buffer = struct.pack(del_fmt, store_serv.group_name, remote_filename)
+            send_buffer = struct.pack(del_fmt, store_serv.group_name.encode(), remote_filename.encode())
             tcp_send_data(store_conn, send_buffer)
             th.recv_header(store_conn)
-            #if th.status == 2:
+            # if th.status == 2:
             #    raise DataError('[-] Error: remote file %s is not exist.' \
             #                    % (store_serv.group_name + os.sep + remote_filename))
             if th.status != 0:
                 raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
-            #recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
+                # recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
         except:
             raise
         finally:
@@ -382,13 +381,13 @@ class Storage_client(object):
         th.cmd = STORAGE_PROTO_CMD_DOWNLOAD_FILE
         try:
             th.send_header(store_conn)
-            #down_fmt: |-offset(8)-download_bytes(8)-group_name(16)-remote_filename(len)-|
+            # down_fmt: |-offset(8)-download_bytes(8)-group_name(16)-remote_filename(len)-|
             down_fmt = '!Q Q %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len)
-            send_buffer = struct.pack(down_fmt, offset, download_size, \
-                                      store_serv.group_name, remote_filename)
+            send_buffer = struct.pack(down_fmt, offset, download_size, store_serv.group_name.encode(),
+                                      remote_filename.encode())
             tcp_send_data(store_conn, send_buffer)
             th.recv_header(store_conn)
-            #if th.status == 2:
+            # if th.status == 2:
             #    raise DataError('[-] Error: remote file %s is not exist.' % 
             #                    (store_serv.group_name + os.sep + remote_filename))
             if th.status != 0:
@@ -402,11 +401,11 @@ class Storage_client(object):
         finally:
             self.pool.release(store_conn)
         ret_dic = {
-            'Remote file_id' : store_serv.group_name + os.sep + remote_filename,
-            'Content' : file_buffer if download_type == \
-                                   FDFS_DOWNLOAD_TO_FILE else recv_buffer,
-            'Download size'   : appromix(total_recv_size),
-            'Storage IP'      : store_serv.ip_addr
+            'Remote file_id': store_serv.group_name + os.sep + remote_filename,
+            'Content': file_buffer if download_type == \
+                                      FDFS_DOWNLOAD_TO_FILE else recv_buffer,
+            'Download size': appromix(total_recv_size),
+            'Storage IP': store_serv.ip_addr
         }
         return ret_dic
 
@@ -416,15 +415,13 @@ class Storage_client(object):
                                               file_offset, download_bytes, \
                                               FDFS_DOWNLOAD_TO_FILE, remote_filename)
 
-    def storage_download_to_buffer(self, tracker_client, store_serv, file_buffer, \
-                                   file_offset, download_bytes, remote_filename):
-        return self._storage_do_download_file(tracker_client, store_serv, file_buffer, \
-                                              file_offset, download_bytes, \
+    def storage_download_to_buffer(self, tracker_client, store_serv, file_buffer, file_offset, download_bytes,
+                                   remote_filename):
+        return self._storage_do_download_file(tracker_client, store_serv, file_buffer, file_offset, download_bytes,
                                               FDFS_DOWNLOAD_TO_BUFFER, remote_filename)
 
-    def storage_set_metadata(self, tracker_client, store_serv, \
-                             remote_filename, meta_dict, \
-                             op_flag = STORAGE_SET_METADATA_FLAG_OVERWRITE):
+    def storage_set_metadata(self, tracker_client, store_serv, remote_filename, meta_dict,
+                             op_flag=STORAGE_SET_METADATA_FLAG_OVERWRITE):
         ret = 0
         conn = self.pool.get_connection()
         remote_filename_len = len(remote_filename)
@@ -432,20 +429,18 @@ class Storage_client(object):
         meta_len = len(meta_buffer)
         th = Tracker_header()
         th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + 1 + \
-                         FDFS_GROUP_NAME_MAX_LEN + remote_filename_len + meta_len
+                     FDFS_GROUP_NAME_MAX_LEN + remote_filename_len + meta_len
         th.cmd = STORAGE_PROTO_CMD_SET_METADATA
         try:
             th.send_header(conn)
-            #meta_fmt: |-filename_len(8)-meta_len(8)-op_flag(1)-group_name(16)
+            # meta_fmt: |-filename_len(8)-meta_len(8)-op_flag(1)-group_name(16)
             #           -filename(remote_filename_len)-meta(meta_len)|
-            meta_fmt = '!Q Q c %ds %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, \
-                                               remote_filename_len, meta_len)
-            send_buffer = struct.pack(meta_fmt, remote_filename_len, meta_len, \
-                                      op_flag, store_serv.group_name, \
-                                      remote_filename, meta_buffer)
+            meta_fmt = '!Q Q c %ds %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len, meta_len)
+            send_buffer = struct.pack(meta_fmt, remote_filename_len, meta_len, op_flag, store_serv.group_name.encode(),
+                                      remote_filename.encode(), meta_buffer)
             tcp_send_data(conn, send_buffer)
             th.recv_header(conn)
-            if th.status != 0 :
+            if th.status != 0:
                 ret = th.status
         except:
             raise
@@ -461,12 +456,12 @@ class Storage_client(object):
         th.cmd = STORAGE_PROTO_CMD_GET_METADATA
         try:
             th.send_header(store_conn)
-            #meta_fmt: |-group_name(16)-filename(remote_filename_len)-|
+            # meta_fmt: |-group_name(16)-filename(remote_filename_len)-|
             meta_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len)
-            send_buffer = struct.pack(meta_fmt, store_serv.group_name, remote_file_name)
+            send_buffer = struct.pack(meta_fmt, store_serv.group_name.encode(), remote_file_name.encode())
             tcp_send_data(store_conn, send_buffer)
             th.recv_header(store_conn)
-            #if th.status == 2:
+            # if th.status == 2:
             #    raise DataError('[-] Error: Remote file %s has no meta data.' \
             #                    % (store_serv.group_name + os.sep + remote_file_name))
             if th.status != 0:
@@ -490,11 +485,10 @@ class Storage_client(object):
         th.cmd = STORAGE_PROTO_CMD_APPEND_FILE
         try:
             th.send_header(store_conn)
-            #append_fmt: |-appended_filename_len(8)-file_size(8)-appended_filename(len)
+            # append_fmt: |-appended_filename_len(8)-file_size(8)-appended_filename(len)
             #             -filecontent(filesize)-|
             append_fmt = '!Q Q %ds' % appended_filename_len
-            send_buffer = struct.pack(append_fmt, appended_filename_len, file_size, \
-                                      appended_filename)
+            send_buffer = struct.pack(append_fmt, appended_filename_len, file_size, appended_filename.encode())
             tcp_send_data(store_conn, send_buffer)
             if upload_type == FDFS_UPLOAD_BY_FILENAME:
                 tcp_send_file(store_conn, file_buffer)
@@ -509,11 +503,9 @@ class Storage_client(object):
             raise
         finally:
             self.pool.release(store_conn)
-        ret_dict = {}
-        ret_dict['Status'] = 'Append file successed.'
-        ret_dict['Appender file name'] = store_serv.group_name + os.sep + appended_filename
-        ret_dict['Appended size'] = appromix(file_size)
-        ret_dict['Storage IP'] = store_serv.ip_addr
+        ret_dict = {'Status': 'Append file successed.',
+                    'Appender file name': store_serv.group_name + os.sep + appended_filename,
+                    'Appended size': appromix(file_size), 'Storage IP': store_serv.ip_addr}
         return ret_dict
 
     def storage_append_by_filename(self, tracker_client, store_serv, \
@@ -524,21 +516,21 @@ class Storage_client(object):
                                             FDFS_UPLOAD_BY_FILENAME, appended_filename)
 
     def storage_append_by_file(self, tracker_client, store_serv, \
-                                   local_filename, appended_filename):
+                               local_filename, appended_filename):
         file_size = os.stat(local_filename).st_size
         return self._storage_do_append_file(tracker_client, store_serv, \
                                             local_filename, file_size, \
                                             FDFS_UPLOAD_BY_FILE, appended_filename)
 
-    def storage_append_by_buffer(self, tracker_client, store_serv, \
+    def storage_append_by_buffer(self, tracker_client, store_serv,
                                  file_buffer, appended_filename):
         file_size = len(file_buffer)
-        return self._storage_do_append_file(tracker_client, store_serv, \
-                                            file_buffer, file_size, \
+        return self._storage_do_append_file(tracker_client, store_serv,
+                                            file_buffer, file_size,
                                             FDFS_UPLOAD_BY_BUFFER, appended_filename)
 
-    def _storage_do_truncate_file(self, tracker_client, store_serv, \
-                                 truncated_filesize, appender_filename):
+    def _storage_do_truncate_file(self, tracker_client, store_serv,
+                                  truncated_filesize, appender_filename):
         store_conn = self.pool.get_connection()
         th = Tracker_header()
         th.cmd = STORAGE_PROTO_CMD_TRUNCATE_FILE
@@ -546,11 +538,11 @@ class Storage_client(object):
         th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appender_filename_len
         try:
             th.send_header(store_conn)
-            #truncate_fmt:|-appender_filename_len(8)-truncate_filesize(8)
+            # truncate_fmt:|-appender_filename_len(8)-truncate_filesize(8)
             #              -appender_filename(len)-|
             truncate_fmt = '!Q Q %ds' % appender_filename_len
-            send_buffer = struct.pack(truncate_fmt, appender_filename_len, \
-                                      truncated_filesize, appender_filename)
+            send_buffer = struct.pack(truncate_fmt, appender_filename_len, truncated_filesize,
+                                      appender_filename.encode())
             tcp_send_data(store_conn, send_buffer)
             th.recv_header(store_conn)
             if th.status != 0:
@@ -570,7 +562,7 @@ class Storage_client(object):
                                               truncated_filesize, appender_filename)
 
     def _storage_do_modify_file(self, tracker_client, store_serv, upload_type, \
-                               filebuffer, offset, filesize, appender_filename):
+                                filebuffer, offset, filesize, appender_filename):
         store_conn = self.pool.get_connection()
         th = Tracker_header()
         th.cmd = STORAGE_PROTO_CMD_MODIFY_FILE
@@ -578,10 +570,9 @@ class Storage_client(object):
         th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 3 + appender_filename_len + filesize
         try:
             th.send_header(store_conn)
-            #modify_fmt: |-filename_len(8)-offset(8)-filesize(8)-filename(len)-|
+            # modify_fmt: |-filename_len(8)-offset(8)-filesize(8)-filename(len)-|
             modify_fmt = '!Q Q Q %ds' % appender_filename_len
-            send_buffer = struct.pack(modify_fmt, appender_filename_len, offset, \
-                                      filesize, appender_filename)
+            send_buffer = struct.pack(modify_fmt, appender_filename_len, offset, filesize, appender_filename.encode())
             tcp_send_data(store_conn, send_buffer)
             if upload_type == FDFS_UPLOAD_BY_FILENAME:
                 upload_size = tcp_send_file(store_conn, filebuffer)
@@ -601,16 +592,11 @@ class Storage_client(object):
         ret_dict['Storage IP'] = store_serv.ip_addr
         return ret_dict
 
-    def storage_modify_by_filename(self, tracker_client, store_serv, \
-                               filename, offset, \
-                               filesize, appender_filename):
-        return self._storage_do_modify_file(tracker_client, store_serv, \
-                                            FDFS_UPLOAD_BY_FILENAME, filename, offset, \
+    def storage_modify_by_filename(self, tracker_client, store_serv, filename, offset, filesize, appender_filename):
+        return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_FILENAME, filename, offset,
                                             filesize, appender_filename)
 
-    def storage_modify_by_file(self, tracker_client, store_serv, \
-                               filename, offset, \
-                               filesize, appender_filename):
+    def storage_modify_by_file(self, tracker_client, store_serv, filename, offset, filesize, appender_filename):
         return self._storage_do_modify_file(tracker_client, store_serv, \
                                             FDFS_UPLOAD_BY_FILE, filename, offset, \
                                             filesize, appender_filename)

+ 96 - 110
fdfs_client/tracker_client.py

@@ -16,23 +16,25 @@ from fdfs_client.exceptions import (
 )
 from fdfs_client.utils import *
 
+
 def parse_storage_status(status_code):
     try:
         ret = {
-            FDFS_STORAGE_STATUS_INIT : lambda       : 'INIT',
-            FDFS_STORAGE_STATUS_WAIT_SYNC : lambda  : 'WAIT_SYNC',
-            FDFS_STORAGE_STATUS_SYNCING : lambda    : 'SYNCING',
-            FDFS_STORAGE_STATUS_IP_CHANGED : lambda : 'IP_CHANGED',
-            FDFS_STORAGE_STATUS_DELETED : lambda    : 'DELETED',
-            FDFS_STORAGE_STATUS_OFFLINE : lambda    : 'OFFLINE',
-            FDFS_STORAGE_STATUS_ONLINE : lambda     : 'ONLINE',
-            FDFS_STORAGE_STATUS_ACTIVE : lambda     : 'ACTIVE',
-            FDFS_STORAGE_STATUS_RECOVERY : lambda   : 'RECOVERY'
+            FDFS_STORAGE_STATUS_INIT: lambda: 'INIT',
+            FDFS_STORAGE_STATUS_WAIT_SYNC: lambda: 'WAIT_SYNC',
+            FDFS_STORAGE_STATUS_SYNCING: lambda: 'SYNCING',
+            FDFS_STORAGE_STATUS_IP_CHANGED: lambda: 'IP_CHANGED',
+            FDFS_STORAGE_STATUS_DELETED: lambda: 'DELETED',
+            FDFS_STORAGE_STATUS_OFFLINE: lambda: 'OFFLINE',
+            FDFS_STORAGE_STATUS_ONLINE: lambda: 'ONLINE',
+            FDFS_STORAGE_STATUS_ACTIVE: lambda: 'ACTIVE',
+            FDFS_STORAGE_STATUS_RECOVERY: lambda: 'RECOVERY'
         }[status_code]()
     except KeyError:
         ret = 'UNKNOW'
     return ret
-    
+
+
 class Storage_info(object):
     def __init__(self):
         self.status = 0
@@ -94,65 +96,49 @@ class Storage_info(object):
         self.last_synced_time = datetime.fromtimestamp(0).isoformat()
         self.last_heartbeat_time = datetime.fromtimestamp(0).isoformat()
         self.if_trunk_server = 0
-        #fmt = |-status(1)-ipaddr(16)-domain(128)-srcipaddr(16)-ver(6)-52*8-|
+        # fmt = |-status(1)-ipaddr(16)-domain(128)-srcipaddr(16)-ver(6)-52*8-|
         self.fmt = '!B %ds %ds %ds %ds %ds 52QB' % (FDFS_STORAGE_ID_MAX_SIZE, \
-                                               IP_ADDRESS_SIZE, \
-                                               FDFS_DOMAIN_NAME_MAX_LEN, \
-                                               IP_ADDRESS_SIZE, \
-                                           FDFS_VERSION_SIZE)
+                                                    IP_ADDRESS_SIZE, \
+                                                    FDFS_DOMAIN_NAME_MAX_LEN, \
+                                                    IP_ADDRESS_SIZE, \
+                                                    FDFS_VERSION_SIZE)
 
     def set_info(self, bytes_stream):
-        (self.status, id, ip_addr, domain_name, \
-         src_ip_addr, version, join_time,up_time, \
-         totalMB, freeMB, self.upload_prio, \
-         self.store_path_count, self.subdir_count_per_path, \
-         self.storage_port, self.storage_http_port, \
-         self.curr_write_path, \
-         self.total_upload_count, self.success_upload_count, \
-         self.total_append_count, self.success_append_count, \
-         self.total_modify_count, self.success_modify_count, \
-         self.total_truncate_count,self.success_truncate_count, \
-         self.total_setmeta_count, self.success_setmeta_count, \
-         self.total_del_count,    self.success_del_count, \
-         self.total_download_count,self.success_download_count, \
-         self.total_getmeta_count, self.success_getmeta_count, \
-         self.total_create_link_count, self.success_create_link_count, \
-         self.total_del_link_count, self.success_del_link_count, \
-         self.total_upload_bytes, self.success_upload_bytes, \
-         self.total_append_bytes, self.total_append_bytes, \
-         self.total_modify_bytes, self.success_modify_bytes, \
-         self.total_download_bytes, self.success_download_bytes, \
-         self.total_sync_in_bytes, self.success_sync_in_bytes, \
-         self.total_sync_out_bytes, self.success_sync_out_bytes, \
-         self.total_file_open_count, self.success_file_open_count, \
-         self.total_file_read_count, self.success_file_read_count, \
-         self.total_file_write_count, self.success_file_write_count, \
-         last_source_sync, last_sync_update, last_synced_time, \
-         last_heartbeat_time, self.if_trunk_server) \
-          = struct.unpack(self.fmt, bytes_stream)
+        (self.status, id, ip_addr, domain_name, src_ip_addr, version, join_time, up_time, totalMB, freeMB, self.upload_prio,
+         self.store_path_count, self.subdir_count_per_path, self.storage_port, self.storage_http_port, self.curr_write_path,
+         self.total_upload_count, self.success_upload_count, self.total_append_count, self.success_append_count, self.total_modify_count, self.success_modify_count,
+         self.total_truncate_count, self.success_truncate_count, self.total_setmeta_count, self.success_setmeta_count,
+         self.total_del_count, self.success_del_count, self.total_download_count, self.success_download_count, self.total_getmeta_count, self.success_getmeta_count,
+         self.total_create_link_count, self.success_create_link_count, self.total_del_link_count, self.success_del_link_count,
+         self.total_upload_bytes, self.success_upload_bytes, self.total_append_bytes, self.total_append_bytes, self.total_modify_bytes, self.success_modify_bytes,
+         self.total_download_bytes, self.success_download_bytes, self.total_sync_in_bytes, self.success_sync_in_bytes,
+         self.total_sync_out_bytes, self.success_sync_out_bytes, self.total_file_open_count, self.success_file_open_count,
+         self.total_file_read_count, self.success_file_read_count, self.total_file_write_count, self.success_file_write_count,
+         last_source_sync, last_sync_update, last_synced_time, last_heartbeat_time, self.if_trunk_server) \
+            = struct.unpack(self.fmt, bytes_stream)
         try:
-            self.id = id.strip('\x00')
-            self.ip_addr = ip_addr.strip('\x00')
-            self.domain_name = domain_name.strip('\x00')
-            self.version = version.strip('\x00')
-            self.src_ip_addr = src_ip_addr.strip('\x00')
+            self.id = id.strip(b'\x00').decode()
+            self.ip_addr = ip_addr.strip(b'\x00').decode()
+            self.domain_name = domain_name.strip(b'\x00').decode()
+            self.version = version.strip(b'\x00').decode()
+            self.src_ip_addr = src_ip_addr.strip(b'\x00').decode()
             self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
             self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
-        except ValueError, e:
+        except ValueError as e:
             raise ResponseError('[-] Error: disk space overrun, can not represented it.')
         self.join_time = datetime.fromtimestamp(join_time).isoformat()
-        self.up_time   = datetime.fromtimestamp(up_time).isoformat()
+        self.up_time = datetime.fromtimestamp(up_time).isoformat()
         self.last_source_sync = datetime.fromtimestamp(last_source_sync).isoformat()
         self.last_sync_update = datetime.fromtimestamp(last_sync_update).isoformat()
         self.last_synced_time = datetime.fromtimestamp(last_synced_time).isoformat()
         self.last_heartbeat_time = \
-           datetime.fromtimestamp(last_heartbeat_time).isoformat()
+            datetime.fromtimestamp(last_heartbeat_time).isoformat()
         return True
 
     def __str__(self):
-        '''Transform to readable string.'''
-        
-        s  = 'Storage information:\n'
+        """Transform to readable string."""
+
+        s = 'Storage information:\n'
         s += '\tid = %s\n' % (self.id)
         s += '\tip_addr = %s (%s)\n' % (self.ip_addr, parse_storage_status(self.status))
         s += '\thttp domain = %s\n' % self.domain_name
@@ -216,21 +202,22 @@ class Storage_info(object):
     def get_fmt_size(self):
         return struct.calcsize(self.fmt)
 
+
 class Group_info(object):
     def __init__(self):
-        self.group_name            = ''
-        self.totalMB               = ''
-        self.freeMB                = ''
-        self.trunk_freeMB          = ''
-        self.count                 = 0
-        self.storage_port          = 0
-        self.store_http_port       = 0
-        self.active_count          = 0
-        self.curr_write_server     = 0
-        self.store_path_count      = 0
+        self.group_name = ''
+        self.totalMB = ''
+        self.freeMB = ''
+        self.trunk_freeMB = ''
+        self.count = 0
+        self.storage_port = 0
+        self.store_http_port = 0
+        self.active_count = 0
+        self.curr_write_server = 0
+        self.store_path_count = 0
         self.subdir_count_per_path = 0
-        self.curr_trunk_file_id    = 0
-        self.fmt                   = '!%ds 11Q' % (FDFS_GROUP_NAME_MAX_LEN + 1)
+        self.curr_trunk_file_id = 0
+        self.fmt = '!%ds 11Q' % (FDFS_GROUP_NAME_MAX_LEN + 1)
         return None
 
     def __str__(self):
@@ -254,9 +241,9 @@ class Group_info(object):
         (group_name, totalMB, freeMB, trunk_freeMB, self.count, self.storage_port, \
          self.store_http_port, self.active_count, self.curr_write_server, \
          self.store_path_count, self.subdir_count_per_path, self.curr_trunk_file_id) \
-        = struct.unpack(self.fmt, bytes_stream)
+            = struct.unpack(self.fmt, bytes_stream)
         try:
-            self.group_name = group_name.strip('\x00')
+            self.group_name = group_name.strip(b'\x00').decode()
             self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
             self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
             self.trunk_freeMB = appromix(trunk_freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
@@ -265,16 +252,18 @@ class Group_info(object):
 
     def get_fmt_size(self):
         return struct.calcsize(self.fmt)
-    
+
+
 class Tracker_client(object):
-    '''Class Tracker client.'''
+    """Class Tracker client."""
+
     def __init__(self, pool):
         self.pool = pool
 
-    def tracker_list_servers(self, group_name, storage_ip = None):
-        '''
+    def tracker_list_servers(self, group_name, storage_ip=None):
+        """
         List servers in a storage group
-        '''
+        """
         conn = self.pool.get_connection()
         th = Tracker_header()
         ip_len = len(storage_ip) if storage_ip else 0
@@ -288,7 +277,7 @@ class Tracker_client(object):
         try:
             th.send_header(conn)
             send_buffer = struct.pack(group_fmt, group_name) + \
-                            struct.pack(storage_ip_fmt, store_ip_addr)
+                          struct.pack(storage_ip_fmt, store_ip_addr)
             tcp_send_data(conn, send_buffer)
             th.recv_header(conn)
             if th.status != 0:
@@ -299,7 +288,7 @@ class Tracker_client(object):
             recv_size = len(recv_buffer)
             if recv_size % si_fmt_size != 0:
                 errinfo = '[-] Error: response size not match, expect: %d, actual: %d' \
-                                   % (th.pkg_len, recv_size)
+                          % (th.pkg_len, recv_size)
                 raise ResponseError(errinfo)
         except ConnectionError:
             conn.disconnect()
@@ -310,7 +299,7 @@ class Tracker_client(object):
         si_list = []
         i = 0
         while num_storage:
-            si.set_info(recv_buffer[(i * si_fmt_size) : ((i + 1) * si_fmt_size)])
+            si.set_info(recv_buffer[(i * si_fmt_size): ((i + 1) * si_fmt_size)])
             si_list.append(si)
             si = Storage_info()
             num_storage -= 1
@@ -325,7 +314,7 @@ class Tracker_client(object):
         th = Tracker_header()
         th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
         th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP
-        #group_fmt: |-group_name(16)-|
+        # group_fmt: |-group_name(16)-|
         group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
         try:
             th.send_header(conn)
@@ -342,7 +331,7 @@ class Tracker_client(object):
             raise
         finally:
             self.pool.release(conn)
-        return group_info   
+        return group_info
 
     def tracker_list_all_groups(self):
         conn = self.pool.get_connection()
@@ -363,7 +352,7 @@ class Tracker_client(object):
         gi_fmt_size = gi.get_fmt_size()
         if recv_size % gi_fmt_size != 0:
             errmsg = '[-] Error: Response size is mismatch, except: %d, actul: %d' \
-                    % (th.pkg_len, recv_size)
+                     % (th.pkg_len, recv_size)
             raise ResponseError(errmsg)
         num_groups = recv_size / gi_fmt_size
         ret_dict = {}
@@ -371,17 +360,17 @@ class Tracker_client(object):
         gi_list = []
         i = 0
         while num_groups:
-            gi.set_info(recv_buffer[i * gi_fmt_size : (i + 1) * gi_fmt_size])
+            gi.set_info(recv_buffer[i * gi_fmt_size: (i + 1) * gi_fmt_size])
             gi_list.append(gi)
             gi = Group_info()
             i += 1
             num_groups -= 1
         ret_dict['Groups'] = gi_list
         return ret_dict
-        
+
     def tracker_query_storage_stor_without_group(self):
-        '''Query storage server for upload, without group name.
-        Return: Storage_server object'''
+        """Query storage server for upload, without group name.
+        Return: Storage_server object"""
         conn = self.pool.get_connection()
         th = Tracker_header()
         th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE
@@ -394,28 +383,28 @@ class Tracker_client(object):
             if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
                 errmsg = '[-] Error: Tracker response length is invaild, '
                 errmsg += 'expect: %d, actual: %d' \
-                         % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
+                          % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
                 raise ResponseError(errmsg)
         except ConnectionError:
             conn.disconnect()
             raise
         finally:
             self.pool.release(conn)
-        #recv_fmt |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)|
+        # recv_fmt |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)|
         recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
         store_serv = Storage_server()
         (group_name, ip_addr, \
          store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
-        store_serv.group_name = group_name.strip('\x00')
-        store_serv.ip_addr = ip_addr.strip('\x00')
+        store_serv.group_name = group_name.strip(b'\x00').decode()
+        store_serv.ip_addr = ip_addr.strip(b'\x00').decode()
         return store_serv
 
     def tracker_query_storage_stor_with_group(self, group_name):
-        '''Query storage server for upload, based group name.
+        """Query storage server for upload, based group name.
         arguments:
         @group_name: string
         @Return Storage_server object
-        '''
+        """
         conn = self.pool.get_connection()
         th = Tracker_header()
         th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE
@@ -432,40 +421,40 @@ class Tracker_client(object):
             if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
                 errmsg = '[-] Error: Tracker response length is invaild, '
                 errmsg += 'expect: %d, actual: %d' \
-                            % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
+                          % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
                 raise ResponseError(errmsg)
         except ConnectionError:
             conn.disconnect()
             raise
         finally:
             self.pool.release(conn)
-        #recv_fmt: |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)-|
+        # recv_fmt: |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)-|
         recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
         store_serv = Storage_server()
         (group, ip_addr, \
          store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
-        store_serv.group_name = group.strip('\x00')
-        store_serv.ip_addr = ip_addr.strip('\x00')
+        store_serv.group_name = group.strip(b'\x00').decode()
+        store_serv.ip_addr = ip_addr.strip(b'\x00').decode()
         return store_serv
 
-    def _tracker_do_query_storage(self,group_name, filename, cmd):
-        '''
+    def _tracker_do_query_storage(self, group_name, filename, cmd):
+        """
         core of query storage, based group name and filename. 
         It is useful download, delete and set meta.
         arguments:
         @group_name: string
         @filename: string. remote file_id
         @Return: Storage_server object
-        '''
+        """
         conn = self.pool.get_connection()
         th = Tracker_header()
         file_name_len = len(filename)
         th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
         th.cmd = cmd
         th.send_header(conn)
-        #query_fmt: |-group_name(16)-filename(file_name_len)-|
+        # query_fmt: |-group_name(16)-filename(file_name_len)-|
         query_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
-        send_buffer = struct.pack(query_fmt, group_name, filename)
+        send_buffer = struct.pack(query_fmt, group_name.encode(), filename.encode())
         try:
             tcp_send_data(conn, send_buffer)
             th.recv_header(conn)
@@ -481,25 +470,22 @@ class Tracker_client(object):
             raise
         finally:
             self.pool.release(conn)
-        #recv_fmt: |-group_name(16)-ip_addr(16)-port(8)-|
+        # recv_fmt: |-group_name(16)-ip_addr(16)-port(8)-|
         recv_fmt = '!%ds %ds Q' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
         store_serv = Storage_server()
         (group_name, ipaddr, store_serv.port) = struct.unpack(recv_fmt, recv_buffer)
-        store_serv.group_name = group_name.strip('\x00')
-        store_serv.ip_addr = ipaddr.strip('\x00')
+        store_serv.group_name = group_name.strip(b'\x00').decode()
+        store_serv.ip_addr = ipaddr.strip(b'\x00').decode()
         return store_serv
 
     def tracker_query_storage_update(self, group_name, filename):
-        '''
+        """
         Query storage server to update(delete and set_meta).
-        '''
-        return self._tracker_do_query_storage(group_name, filename, \
-                                              TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE)
+        """
+        return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE)
 
     def tracker_query_storage_fetch(self, group_name, filename):
-        '''
+        """
         Query storage server to download.
-        '''
-        return self._tracker_do_query_storage(group_name, filename, \
-                                              TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE)
-    
+        """
+        return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE)

+ 103 - 94
fdfs_client/utils.py

@@ -1,19 +1,24 @@
 #!/usr/bin/env python
 # -*- coding = utf-8 -*-
 # filename: utils.py
+from configparser import DEFAULTSECT, MissingSectionHeaderError, ParsingError, RawConfigParser, \
+    NoSectionError
+import os
+import stat
+from mutagen._compat import StringIO
 
-import os, sys, stat
-import ConfigParser 
-import StringIO 
+from requests.compat import basestring
 
-SUFFIX = ['B', 'KB', 'MB', 'GB','TB', 'PB', 'EB', 'ZB', 'YB']
-def appromix(size, base = 0):
-    '''Conver bytes stream size to human-readable format.
+SUFFIX = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
+
+
+def appromix(size, base=0):
+    """Conver bytes stream size to human-readable format.
     Keyword arguments:
     size: int, bytes stream size
     base: int, suffix index
     Return: string
-    '''
+    """
     multiples = 1024
     if size < 0:
         raise ValueError('[-] Error: number must be non-negative.')
@@ -21,11 +26,12 @@ def appromix(size, base = 0):
         return '{0:d}{1}'.format(size, SUFFIX[base])
     for suffix in SUFFIX[base:]:
         if size < multiples:
-            return '{0:.2f}{1}'.format(size,suffix)
-        size //=multiples
+            return '{0:.2f}{1}'.format(size, suffix)
+        size //= multiples
     raise ValueError('[-] Error: number too big.')
 
-def get_file_ext_name(filename, double_ext = True):
+
+def get_file_ext_name(filename, double_ext=True):
     li = filename.split(os.extsep)
     if len(li) <= 1:
         return ''
@@ -38,87 +44,88 @@ def get_file_ext_name(filename, double_ext = True):
                 return '%s.%s' % (li[-2], li[-1])
     return li[-1]
 
-class Fdfs_ConfigParser(ConfigParser.RawConfigParser): 
+
+class Fdfs_ConfigParser(RawConfigParser):
     """ 
     Extends ConfigParser to allow files without sections. 
  
     This is done by wrapping read files and prepending them with a placeholder 
     section, which defaults to '__config__' 
-    """ 
- 
-    def __init__(self, default_section=None, *args, **kwargs): 
-        ConfigParser.RawConfigParser.__init__(self, *args, **kwargs) 
- 
-        self._default_section = None 
-        self.set_default_section(default_section or '__config__') 
- 
-    def get_default_section(self): 
-        return self._default_section 
- 
-    def set_default_section(self, section): 
-        self.add_section(section) 
- 
+    """
+
+    def __init__(self, default_section=None, *args, **kwargs):
+        RawConfigParser.__init__(self, *args, **kwargs)
+
+        self._default_section = None
+        self.set_default_section(default_section or '__config__')
+
+    def get_default_section(self):
+        return self._default_section
+
+    def set_default_section(self, section):
+        self.add_section(section)
+
         # move all values from the previous default section to the new one 
-        try: 
-            default_section_items = self.items(self._default_section) 
-            self.remove_section(self._default_section) 
-        except ConfigParser.NoSectionError: 
-            pass 
-        else: 
-            for (key, value) in default_section_items: 
-                self.set(section, key, value) 
- 
-        self._default_section = section 
- 
-    def read(self, filenames): 
-        if isinstance(filenames, basestring): 
-            filenames = [filenames] 
- 
-        read_ok = [] 
-        for filename in filenames: 
-            try: 
-                with open(filename) as fp: 
-                    self.readfp(fp) 
-            except IOError: 
-                continue 
-            else: 
-                read_ok.append(filename) 
- 
-        return read_ok 
- 
-    def readfp(self, fp, *args, **kwargs): 
-        stream = StringIO.StringIO() 
- 
-        try: 
-            stream.name = fp.name 
-        except AttributeError: 
-            pass 
- 
-        stream.write('[' + self._default_section + ']\n') 
-        stream.write(fp.read()) 
-        stream.seek(0, 0) 
- 
-        return self._read(stream, stream.name) 
- 
-    def write(self, fp): 
+        try:
+            default_section_items = self.items(self._default_section)
+            self.remove_section(self._default_section)
+        except NoSectionError:
+            pass
+        else:
+            for (key, value) in default_section_items:
+                self.set(section, key, value)
+
+        self._default_section = section
+
+    def read(self, filenames):
+        if isinstance(filenames, basestring):
+            filenames = [filenames]
+
+        read_ok = []
+        for filename in filenames:
+            try:
+                with open(filename) as fp:
+                    self.readfp(fp)
+            except IOError as e:
+                continue
+            else:
+                read_ok.append(filename)
+
+        return read_ok
+
+    def readfp(self, fp, *args, **kwargs):
+        stream = StringIO()
+
+        try:
+            stream.name = fp.name
+        except AttributeError as e:
+            pass
+
+        stream.write('[' + self._default_section + ']\n')
+        stream.write(fp.read())
+        stream.seek(0, 0)
+
+        return self._read(stream, stream.name)
+
+    def write(self, fp):
         # Write the items from the default section manually and then remove them 
         # from the data. They'll be re-added later. 
-        try: 
-            default_section_items = self.items(self._default_section) 
-            self.remove_section(self._default_section) 
- 
-            for (key, value) in default_section_items: 
-                fp.write("{0} = {1}\n".format(key, value)) 
- 
-            fp.write("\n") 
-        except ConfigParser.NoSectionError: 
-            pass 
- 
-        ConfigParser.RawConfigParser.write(self, fp) 
- 
-        self.add_section(self._default_section) 
-        for (key, value) in default_section_items: 
-            self.set(self._default_section, key, value) 
+        try:
+            default_section_items = self.items(self._default_section)
+            self.remove_section(self._default_section)
+
+            for (key, value) in default_section_items:
+                fp.write("{0} = {1}\n".format(key, value))
+
+            fp.write("\n")
+        except NoSectionError:
+            pass
+
+        RawConfigParser.write(self, fp)
+
+        self.add_section(self._default_section)
+        for (key, value) in default_section_items:
+            self.set(self._default_section, key, value)
 
     def _read(self, fp, fpname):
         """Parse a sectioned setup file.
@@ -130,15 +137,15 @@ class Fdfs_ConfigParser(ConfigParser.RawConfigParser):
         leading whitespace.  Blank lines, lines beginning with a '#',
         and just about everything else are ignored.
         """
-        cursect = None                            # None, or a dictionary
+        cursect = None  # None, or a dictionary
         optname = None
         lineno = 0
-        e = None                                  # None, or an exception
+        e = None  # None, or an exception
         while True:
             line = fp.readline()
             if not line:
                 break
-            lineno = lineno + 1
+            lineno += 1
             # comment or blank line?
             if line.strip() == '' or line[0] in '#;':
                 continue
@@ -178,14 +185,14 @@ class Fdfs_ConfigParser(ConfigParser.RawConfigParser):
                             # ';' is a comment delimiter only if it follows
                             # a spacing character
                             pos = optval.find(';')
-                            if pos != -1 and optval[pos-1].isspace():
+                            if pos != -1 and optval[pos - 1].isspace():
                                 optval = optval[:pos]
                         optval = optval.strip()
                         # allow empty values
                         if optval == '""':
                             optval = ''
                         optname = self.optionxform(optname.rstrip())
-                        if cursect.has_key(optname):
+                        if cursect.get(optname):
                             if not isinstance(cursect[optname], list):
                                 cursect[optname] = [cursect[optname]]
                             cursect[optname].append(optval)
@@ -203,17 +210,19 @@ class Fdfs_ConfigParser(ConfigParser.RawConfigParser):
         if e:
             raise e
 
+
 def split_remote_fileid(remote_file_id):
-    '''
+    """
     Splite remote_file_id to (group_name, remote_file_name)
     arguments:
     @remote_file_id: string
     @return tuple, (group_name, remote_file_name)
-    '''
+    """
     index = remote_file_id.find('/')
     if -1 == index:
         return None
-    return (remote_file_id[0:index], remote_file_id[(index + 1):])
+    return remote_file_id[0:index], remote_file_id[(index + 1):]
+
 
 def fdfs_check_file(filename):
     ret = True
@@ -224,8 +233,8 @@ def fdfs_check_file(filename):
     elif not stat.S_ISREG(os.stat(filename).st_mode):
         ret = False
         errmsg = '[-] Error: %s is not a regular file.' % filename
-    return (ret, errmsg)
+    return ret, errmsg
+
 
 if __name__ == '__main__':
-    print get_file_ext_name('/bc.tar.gz')
-    
+    print(get_file_ext_name('/bc.tar.gz'))

+ 15 - 15
setup.py

@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 import os
 from fdfs_client import __version__
+
 try:
     from setuptools import setup, Extension
 except ImportError:
@@ -11,28 +12,27 @@ long_description = f.read()
 f.close()
 
 sdict = {
-    'name' : 'fdfs_client-py',
-    'version' : __version__,
-    'description' : 'Python client for Fastdfs ver 4.06',
-    'long_description' : long_description,
-    'author' : 'scott yuan',
-    'author_email' : 'scottzer8@gmail.com',
-    'maintainer' : 'scott yuan',
-    'maintainer_email' : 'scottzer8@gmail.com',
-    'keywords' : ['Fastdfs', 'Distribute File System'],
-    'license' : 'GPLV3',
-    'packages' : ['fdfs_client'],
-    'classifiers' : [
+    'name': 'fdfs_client-py',
+    'version': __version__,
+    'description': 'Python client for Fastdfs ver 4.06',
+    'long_description': long_description,
+    'author': 'scott yuan',
+    'author_email': 'scottzer8@gmail.com',
+    'maintainer': 'scott yuan',
+    'maintainer_email': 'scottzer8@gmail.com',
+    'keywords': ['Fastdfs', 'Distribute File System'],
+    'license': 'GPLV3',
+    'packages': ['fdfs_client'],
+    'classifiers': [
         'Development Status :: 1 - Production/Beta',
         'Environment :: Console',
         'Intended Audience :: Developers',
         'License :: GPLV3',
         'Operating System :: OS Independent',
         'Programming Language :: Python'],
-    'ext_modules' : [Extension('fdfs_client.sendfile',
-                             sources = ['fdfs_client/sendfilemodule.c'])],
+    'ext_modules': [Extension('fdfs_client.sendfile',
+                              sources=['fdfs_client/sendfilemodule.c'])],
 }
 
-
 setup(**sdict)