Skip to content

Instantly share code, notes, and snippets.

@unbracketed
Last active August 3, 2023 18:13
Show Gist options
  • Save unbracketed/3380407 to your computer and use it in GitHub Desktop.
Save unbracketed/3380407 to your computer and use it in GitHub Desktop.

Revisions

  1. @ktilcu ktilcu revised this gist Oct 14, 2013. 1 changed file with 5 additions and 3 deletions.
    8 changes: 5 additions & 3 deletions export_repo_issues_to_csv.py
    Original file line number Diff line number Diff line change
    @@ -19,14 +19,16 @@ def write_issues(response):
    if not r.status_code == 200:
    raise Exception(r.status_code)
    for issue in r.json():
    print issue
    csvout.writerow([issue['number'], issue['title'].encode('utf-8'), issue['html_url']])
    labels = issue['labels']
    for label in labels:
    if label['name'] == "Client Requested":
    csvout.writerow([issue['number'], issue['title'].encode('utf-8'), issue['body'].encode('utf-8'), issue['created_at'], issue['updated_at']])


    r = requests.get(ISSUES_FOR_REPO_URL, auth=AUTH)
    csvfile = '%s-issues.csv' % (REPO.replace('/', '-'))
    csvout = csv.writer(open(csvfile, 'wb'))
    csvout.writerow(('id', 'Title', 'URL'))
    csvout.writerow(('id', 'Title', 'Body', 'Created At', 'Updated At'))
    write_issues(r)

    #more pages? examine the 'link' header returned
  2. Brian Luft revised this gist Apr 25, 2013. 1 changed file with 2 additions and 2 deletions.
    4 changes: 2 additions & 2 deletions export_repo_issues_to_csv.py
    Original file line number Diff line number Diff line change
    @@ -11,7 +11,7 @@
    GITHUB_USER = ''
    GITHUB_PASSWORD = ''
    REPO = '' # format is username/repo
    ISSUES_FOR_REPO_URL = 'https://api.github.com/repos/{}/issues'.format(REPO)
    ISSUES_FOR_REPO_URL = 'https://api.github.com/repos/%s/issues' % REPO
    AUTH = (GITHUB_USER, GITHUB_PASSWORD)

    def write_issues(response):
    @@ -24,7 +24,7 @@ def write_issues(response):


    r = requests.get(ISSUES_FOR_REPO_URL, auth=AUTH)
    csvfile = '{}-issues.csv'.format(REPO.replace('/', '-'))
    csvfile = '%s-issues.csv' % (REPO.replace('/', '-'))
    csvout = csv.writer(open(csvfile, 'wb'))
    csvout.writerow(('id', 'Title', 'URL'))
    write_issues(r)
  3. Brian Luft revised this gist Apr 25, 2013. 1 changed file with 1 addition and 1 deletion.
    2 changes: 1 addition & 1 deletion export_repo_issues_to_csv.py
    Original file line number Diff line number Diff line change
    @@ -18,7 +18,7 @@ def write_issues(response):
    "output a list of issues to csv"
    if not r.status_code == 200:
    raise Exception(r.status_code)
    for issue in r.json:
    for issue in r.json():
    print issue
    csvout.writerow([issue['number'], issue['title'].encode('utf-8'), issue['html_url']])

  4. Brian Luft created this gist Aug 17, 2012.
    42 changes: 42 additions & 0 deletions export_repo_issues_to_csv.py
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,42 @@
    """
    Exports Issues from a specified repository to a CSV file
    Uses basic authentication (Github username + password) to retrieve Issues
    from a repository that username has access to. Supports Github API v3.
    """
    import csv
    import requests


    GITHUB_USER = ''
    GITHUB_PASSWORD = ''
    REPO = '' # format is username/repo
    ISSUES_FOR_REPO_URL = 'https://api.github.com/repos/{}/issues'.format(REPO)
    AUTH = (GITHUB_USER, GITHUB_PASSWORD)

    def write_issues(response):
    "output a list of issues to csv"
    if not r.status_code == 200:
    raise Exception(r.status_code)
    for issue in r.json:
    print issue
    csvout.writerow([issue['number'], issue['title'].encode('utf-8'), issue['html_url']])


    r = requests.get(ISSUES_FOR_REPO_URL, auth=AUTH)
    csvfile = '{}-issues.csv'.format(REPO.replace('/', '-'))
    csvout = csv.writer(open(csvfile, 'wb'))
    csvout.writerow(('id', 'Title', 'URL'))
    write_issues(r)

    #more pages? examine the 'link' header returned
    if 'link' in r.headers:
    pages = dict(
    [(rel[6:-1], url[url.index('<')+1:-1]) for url, rel in
    [link.split(';') for link in
    r.headers['link'].split(',')]])
    while 'last' in pages and 'next' in pages:
    r = requests.get(pages['next'], auth=AUTH)
    write_issues(r)
    if pages['next'] == pages['last']:
    break