Added destory functions, empty trash, updated tests, other small refractoring

This commit is contained in:
richard@richard.do 2013-02-23 15:06:02 +00:00
parent 27b636aa26
commit c77ec993e9
4 changed files with 177 additions and 114 deletions

View file

@ -52,11 +52,14 @@ This is a work in progress, further functionality coming shortly.
file = m.find('myfile.doc')
m.get_link(file)
### Trash a file from URL, it's ID, or from search
### Trash or destroy a file from URL, it's ID, or from search
m.delete('utYjgSTQ')
m.delete_url('https://mega.co.nz/#!utYjgSTQ!OM4U3V5v_W4N5edSo0wolg1D5H0fwSrLD3oLnLuS9pc')
m.destroy('utYjgSTQ')
m.destroy_url('https://mega.co.nz/#!utYjgSTQ!OM4U3V5v_W4N5edSo0wolg1D5H0fwSrLD3oLnLuS9pc')
files = m.find('myfile.doc')
if files:
m.delete(files[1]['k'])

View file

@ -73,10 +73,15 @@ class Mega(object):
if self.sid:
params.update({'sid': self.sid})
#ensure input data is a list
if not isinstance(data, list):
data = [data]
req = requests.post(
'{0}://g.api.{1}/cs'.format(self.schema, self.domain),
params=params,
data=json.dumps([data]),
data=json.dumps(data),
timeout=self.timeout)
json_resp = json.loads(req.text)
@ -85,6 +90,61 @@ class Mega(object):
raise RequestError(json_resp)
return json_resp[0]
def parse_url(self, url):
#parse file id and key from url
if ('!' in url):
match = re.findall(r'/#!(.*)', url)
path = match[0]
return path
else:
raise RequestError('Url key missing')
def process_file(self, file):
"""
Process a file...
"""
if file['t'] == 0 or file['t'] == 1:
key = file['k'][file['k'].index(':') + 1:]
#fix for shared folder key format {k: foo1:bar1/foo2:bar2 }
uid = file['u']
keys = file['k'].split('/')
regex = re.compile('^%s:.*$' % uid)
for keytmp in keys:
if regex.match(keytmp):
key = keytmp[keytmp.index(':') + 1:]
key = decrypt_key(base64_to_a32(key), self.master_key)
if file['t'] == 0:
k = (key[0] ^ key[4], key[1] ^ key[5], key[2] ^ key[6],
key[3] ^ key[7])
file['iv'] = key[4:6] + (0, 0)
file['meta_mac'] = key[6:8]
else:
k = file['k'] = key
attributes = base64_url_decode(file['a'])
attributes = decrypt_attr(attributes, k)
file['a'] = attributes
elif file['t'] == 2:
self.root_id = file['h']
file['a'] = {'n': 'Cloud Drive'}
elif file['t'] == 3:
self.inbox_id = file['h']
file['a'] = {'n': 'Inbox'}
elif file['t'] == 4:
self.trashbin_id = file['h']
file['a'] = {'n': 'Rubbish Bin'}
return file
def find(self, filename):
'''
Return file object from given filename
'''
files = self.get_files()
for file in files.items():
if file[1]['a'] and file[1]['a']['n'] == filename:
return file
##########################################################################
# GET
def get_files(self):
'''
Get all files in account
@ -134,53 +194,71 @@ class Mega(object):
else:
raise ValidationError('File id and key must be present')
def download_url(self, url, dest_path=None):
'''
Download a file by it's public url
'''
path = self.parse_url(url).split('!')
file_id = path[0]
file_key = path[1]
self.download_file(file_id, file_key, dest_path, is_public=True)
def download(self, file, dest_path=None):
'''
Download a file by it's file object
'''
url = self.get_link(file)
self.download_url(url, dest_path)
def parse_url(self, url):
#parse file id and key from url
if ('!' in url):
match = re.findall(r'/#!(.*)', url)
path = match[0]
return path
else:
raise RequestError('Url key missing')
def get_user(self):
user_data = self.api_request({'a': 'ug'})
return user_data
def get_node_by_type(self, type):
'''
Get a node by it's numeric type id, e.g:
0: file
1: dir
2: special: root cloud drive
3: special: inbox
4: special trash bin
'''
nodes = self.get_files()
for node in nodes.items():
if (node[1]['t'] == type):
return node
def get_files_in_node(self, target):
'''
Get all files in a given target, e.g. 4=trash
'''
node_id = self.get_node_by_type(target)
files = self.api_request({'a': 'f', 'c': 1})
files_dict = {}
for file in files['f']:
processed_file = self.process_file(file)
if processed_file['a'] and processed_file['p'] == node_id[0]:
files_dict[file['h']] = processed_file
return files_dict
def get_id_from_public_handle(self, public_handle):
#get node data
node_data = self.api_request({'a': 'f', 'f': 1, 'p': public_handle})
node_id = None
#determine node id
for i in node_data['f']:
if i['h'] is not u'':
node_id = i['h']
return node_id
##########################################################################
# DELETE
def delete(self, public_handle):
#straight delete by id
return self.move(public_handle, 4)
def delete_url(self, url):
#delete a file via it's url
path = self.parse_url(url).split('!')
public_handle = path[0]
return self.move(public_handle, 4)
def delete(self, public_handle):
#straight delete by id
return self.move(public_handle, 4)
def find(self, filename):
'''
Return file object from given filename
'''
files = self.get_files()
for file in files.items():
if file[1]['a'] and file[1]['a']['n'] == filename:
return file
def destroy(self, file_id):
#delete forever by private id
return self.api_request({'a': 'd',
'n': file_id,
'i': self.request_id})
def destroy_url(self, url):
#delete a file via it's url
path = self.parse_url(url).split('!')
public_handle = path[0]
file_id = self.get_id_from_public_handle(public_handle)
return self.destroy(file_id)
def move(self, public_handle, target):
#TODO node_id improvements
@ -210,20 +288,37 @@ class Mega(object):
return self.api_request({'a': 'm', 'n': node_id, 't': target_node_id,
'i': self.request_id})
def get_node_by_type(self, type):
'''
Get a node by it's numeric type id, e.g:
0: file
1: dir
2: special: root cloud drive
3: special: inbox
4: special trash bin
'''
nodes = self.get_files()
for node in nodes.items():
if (node[1]['t'] == type):
return node
def empty_trash(self):
# get list of files in rubbish out
files = self.get_files_in_node(4)
# make a list of json
if files != {}:
post_list = []
for file in files:
post_list.append({"a": "d",
"n": file,
"i": self.request_id})
return self.api_request(post_list)
##########################################################################
# DOWNLOAD
def download(self, file, dest_path=None):
'''
Download a file by it's file object
'''
url = self.get_link(file)
self.download_url(url, dest_path)
def download_url(self, url, dest_path=None):
'''
Download a file by it's public url
'''
path = self.parse_url(url).split('!')
file_id = path[0]
file_key = path[1]
self.download_file(file_id, file_key, dest_path, is_public=True)
def download_file(self, file_handle, file_key, dest_path=None, is_public=False):
if is_public:
@ -243,9 +338,9 @@ class Mega(object):
attribs = decrypt_attr(attribs, k)
file_name = attribs['n']
print "downloading {0} (size: {1}), url = {2}".format(attribs['n'].encode("utf8"),
print("downloading {0} (size: {1}), url = {2}".format(attribs['n'].encode("utf8"),
file_size,
file_url)
file_url))
input_file = requests.get(file_url, stream=True).raw
@ -289,6 +384,8 @@ class Mega(object):
if (file_mac[0] ^ file_mac[1], file_mac[2] ^ file_mac[3]) != meta_mac:
raise ValueError('Mismatched mac')
##########################################################################
# UPLOAD
def upload(self, filename, dest=None):
#determine storage node
if dest is None:
@ -355,38 +452,3 @@ class Mega(object):
#close input file and return API msg
input_file.close()
return data
def process_file(self, file):
"""
Process a file...
"""
if file['t'] == 0 or file['t'] == 1:
key = file['k'][file['k'].index(':') + 1:]
#fix for shared folder key format {k: foo1:bar1/foo2:bar2 }
uid = file['u']
keys = file['k'].split('/')
regex = re.compile('^%s:.*$' % uid)
for keytmp in keys:
if regex.match(keytmp):
key = keytmp[keytmp.index(':') + 1:]
key = decrypt_key(base64_to_a32(key), self.master_key)
if file['t'] == 0:
k = (key[0] ^ key[4], key[1] ^ key[5], key[2] ^ key[6],
key[3] ^ key[7])
file['iv'] = key[4:6] + (0, 0)
file['meta_mac'] = key[6:8]
else:
k = file['k'] = key
attributes = base64_url_decode(file['a'])
attributes = decrypt_attr(attributes, k)
file['a'] = attributes
elif file['t'] == 2:
self.root_id = file['h']
file['a'] = {'n': 'Cloud Drive'}
elif file['t'] == 3:
self.inbox_id = file['h']
file['a'] = {'n': 'Inbox'}
elif file['t'] == 4:
self.trashbin_id = file['h']
file['a'] = {'n': 'Rubbish Bin'}
return file

View file

@ -30,7 +30,7 @@ def get_package_data(package):
setup(
name='mega.py',
version='0.8.1',
version='0.8.2',
packages=get_packages('mega'),
package_data=get_package_data('mega'),
description='Python lib for the Mega.co.nz API',

View file

@ -7,45 +7,43 @@ def test():
mega = Mega()
##login
#login
m = mega.login(email, password)
##get user details
#get user details
details = m.get_user()
print(details)
##get account files
#get account files
files = m.get_files()
#example iterate over files
for file in files:
print files[file]
##upload file
#upload file
print(m.upload('tests.py'))
##get file's public link
#NOTE: if passing upload() function response use get_upload_link()
#search for a file in account
file = m.find('tests.py')
#print(m.get_upload_link(file))
print(m.get_link(file))
##trash a file, by id or url
#print(m.delete('f14U0JhD'))
#print(m.delete_url('https://mega.co.nz/#!f14U0JhD!S_2k-EvB5U1N3s0vm3I5C0JN2toHSGkVf0UxQsiKZ8A'))
##search for a file in account
file = m.find('somefile.doc')
if file:
#trash a file by it's id
#get public link
link = m.get_link(file)
print(link)
#download file. by file object or url
m.download(file, '/tmp')
#m.download_url(link)
#delete or destroy file. by id or url
print(m.delete(file[1]['k']))
#print(m.destroy(file[1]['h']))
#print(m.delete_url(link))
#print(m.destroy_url(link))
##download file
#file = m.find('tests.py')
#m.download(file)
##specify destination folder
#m.download(file, '/home/user_name/Desktop')
#m.download_url('https://mega.co.nz/#!6hBW0R4a!By7-Vjj5xal8K5w_IXH3PlGNyZ1VvIrjZkOmHGq1X00')
#empty trash
print(m.empty_trash())
if __name__ == '__main__':
test()