Extract Project Information using Python and Atlassian API
Platform Notice: Cloud Only - This article only applies to Atlassian products on the cloud platform.
Summary
Disclaimer
Atlassian does not support this code below, which is provided "AS IS". The goal of this article is to provide a piece of code that illustrates one way to achieve the desired goal.
Feedback provided at the bottom of the page is appreciated, but won't be handled as support.
The Python script on this page retrieves specific project information:
Project ID
Project Name
Project Key
Project Lead
Workflow scheme associated to the project
Total Issue Count per project
Last Issue Update per project
Issue Types per project
Solution
Environment
Jira Cloud
This script requires Python 3 to be installed
The script uses the Jira Cloud Platform API
Usage
The Python script requires a API_TOKEN: Manage API tokens for your Atlassian account
User with Administrative Access to the instance: Permissions
For large instances, the process can take a while, since there is a execution delay of 1 second between the calls, to avoid timeout/Too Many Request error
Here is the script - Adjust your User Name and API TOKEN accordingly
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import requests
from requests.auth import HTTPBasicAuth
import csv
import json
import time # Import time module to use for adding delays
# Replace with your Jira credentials and URL
JIRA_URL = "https://<instance>.atlassian.net"
API_PROJECTS_ENDPOINT = "/rest/api/3/project/search"
API_WORKFLOW_ENDPOINT = "/rest/api/3/workflowscheme/project"
USERNAME = "<email_address>"
API_TOKEN = "<API_TOKEN>"
# Delay time in seconds (customize as needed)
DELAY_TIME = 1 # 2-second delay between API calls
# Jira API headers
headers = {
"Accept": "application/json"
}
# Function to get the workflow scheme for a project by project ID
def get_workflow_for_project(project_id):
url = f"{JIRA_URL}{API_WORKFLOW_ENDPOINT}?projectId={project_id}"
response = requests.get(url, headers=headers, auth=HTTPBasicAuth(USERNAME, API_TOKEN))
if response.status_code != 200:
print(f"Failed to fetch workflow for project ID {project_id}. Status code: {response.status_code}")
return "N/A"
data = response.json()
# Ensure "values" exist and has at least one item
if "values" in data and len(data['values']) > 0:
workflow_scheme = data['values'][0].get('workflowScheme', {})
return workflow_scheme.get('name', 'N/A') # Get the workflow scheme name
else:
return "N/A"
# Function to extract issue types for a project
def get_issue_types_for_project(project):
issue_types = project.get('issueTypes', [])
# Extract the names of all issue types and join them with commas
issue_type_names = [issue_type.get('name') for issue_type in issue_types]
return ", ".join(issue_type_names) if issue_type_names else "N/A"
# Function to get paginated projects
def get_paginated_projects():
start_at = 0
max_results = 50 # Adjust as needed
all_projects = []
total_projects = None # Initialize total projects as None initially
while True:
# Set the API URL with pagination and expand parameters
url = f"{JIRA_URL}{API_PROJECTS_ENDPOINT}?expand=lead,insight,issueTypes&startAt={start_at}&maxResults={max_results}"
# Send a GET request to the Jira API
response = requests.get(url, headers=headers, auth=HTTPBasicAuth(USERNAME, API_TOKEN))
if response.status_code != 200:
print(f"Failed to fetch projects. Status code: {response.status_code}, Response: {response.text}")
break
# Parse the response JSON
data = response.json()
# Set total projects on the first API call
if total_projects is None:
total_projects = data['total']
print(f"Total number of projects to fetch: {total_projects}")
# Extract the required fields and fetch workflow scheme and issue types
for project in data['values']:
project_id = project.get('id') # Get the project ID
workflow_name = get_workflow_for_project(project_id) # Get the workflow scheme for this project
insight = project.get('insight', {})
issue_types = get_issue_types_for_project(project) # Get the issue types for this project
project_info = {
"Project Key": project.get('key'),
"Project ID": project.get('id'),
"Project Name": project.get('name'),
"Project Lead": project.get('lead', {}).get('displayName', 'N/A'),
"totalIssueCount": insight.get('totalIssueCount', 'N/A'),
"lastIssueUpdateTime": insight.get('lastIssueUpdateTime', 'N/A'),
"Assigned Workflow": workflow_name, # Add the workflow scheme name
"issueTypes": issue_types # Add the issue types
}
all_projects.append(project_info)
# Print progress after each API call
print(f"Fetched {len(all_projects)} projects out of {total_projects} so far.")
# Check if there are more results to fetch
if len(data['values']) < max_results:
break # No more pages to fetch
# Increment the starting index for pagination
start_at += max_results
# Add delay between consecutive API requests
time.sleep(DELAY_TIME) # Delay between requests
return all_projects
# Function to export data to CSV
def export_to_csv(projects, file_name="projects_with_lead_insight_workflow_issuetypes.csv"):
# Define the CSV file headers
headers = ["Project Key", "Project ID", "Project Name", "Project Lead", "totalIssueCount", "lastIssueUpdateTime", "Assigned Workflow", "issueTypes"]
# Write to CSV
with open(file_name, mode='w', newline='', encoding='utf-8') as file:
writer = csv.DictWriter(file, fieldnames=headers)
# Write the header row
writer.writeheader()
# Write project data rows
for project in projects:
writer.writerow(project)
# Fetch all paginated projects
projects = get_paginated_projects()
# Export the data to a CSV file
export_to_csv(projects, "jira_projects_with_lead_insight_workflow_issuetypes.csv")
print(f"Exported {len(projects)} projects to 'jira_projects_with_lead_insight_workflow_issuetypes.csv'")
Was this helpful?