#!/usr/bin/env python3 # Use this to download all of your Snap memories once you have received the # package containing all of your data. Working as of January 2021. # See https://accounts.snapchat.com/accounts/downloadmydata import hashlib import os import re import requests import shutil import sys import uuid def download_memory(url, out_dir): """ Takes URLs of the form https://app.snapchat.com/dmd/memories?uid=, downloads the asset and writes them to disk. """ # Exchange Snap url for CDN url resp = requests.post(url, headers={'Content-Type': 'application/x-www-form-urlencoded'}) if resp.status_code != 200: print(f'[!] Error retrieving CDN url from {url}: got {resp.status_code}') return cdn_url = resp.text # Retrieve CDN url contents with requests.get(cdn_url) as resp: # Warning: this reads the whole thing into memory if resp.status_code != 200: print(f'[!] Error retrieving CDN contents for {cdn_url}: got {resp.status_code}') return data = resp.content digest = hashlib.md5(data).hexdigest() ext = 'jpg' if '.jpg' in cdn_url else 'mp4' path = f'{out_dir}/{digest}.{ext}' if os.path.exists(path): print(f'[!] Downloaded dupe file {path}') return # Write to disk with open(path, 'wb') as f: f.write(data) print(f'[*] Downloaded {path}') def download(memories_file, out_dir): """ Extracts memory URLs from the HTML file included in the "Download Your Information" download. """ os.makedirs(out_dir) with open(memories_file) as f: html = f.read() pattern = re.compile("downloadMemories\('(.*?)'\)") urls = pattern.findall(html) print(f'[*] Found {len(urls)} memories') for i, url in enumerate(urls): print(f'[*] Progress: {i*100/len(urls):.2f}%') download_memory(url, out_dir) print('[*] Done.') if __name__ == '__main__': if len(sys.argv) < 2: print(f'[!] Usage: {sys.argv[0]} ') exit(1) download(sys.argv[1], sys.argv[2])