From a042628c73b0727440babf438be4586464f6941e Mon Sep 17 00:00:00 2001 From: Prince Date: Sat, 18 Mar 2023 20:33:37 +0100 Subject: [PATCH 01/17] Update to setup config --- setup.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index c7ba6a0c..b3b75c73 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,9 @@ "for performing other Jira queries.", long_description=long_description, long_description_content_type="text/markdown", - url="https://github.com/princenyeche/jiraone", + url="https://jiraone.readthedocs.io", + license="MIT License", + keywords="jiraone,Atlassian API,Jira API", packages=setuptools.find_packages(), install_requires=['requests'], classifiers=[ @@ -21,4 +23,6 @@ "Operating System :: OS Independent", ], python_requires='>=3.6', + project_urls={"Issues": "https://github.com/princenyeche/jiraone/issues", + "Github": "https://github.com/princenyeche/jiraone"} ) From a667ada0f9851356bec53da0240cbb4847e099e4 Mon Sep 17 00:00:00 2001 From: Prince Date: Sat, 18 Mar 2023 20:45:02 +0100 Subject: [PATCH 02/17] Changed keyword arg in project_urls --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index b3b75c73..6b756293 100644 --- a/setup.py +++ b/setup.py @@ -23,6 +23,6 @@ "Operating System :: OS Independent", ], python_requires='>=3.6', - project_urls={"Issues": "https://github.com/princenyeche/jiraone/issues", - "Github": "https://github.com/princenyeche/jiraone"} + project_urls={"Tracker": "https://github.com/princenyeche/jiraone/issues", + "Source": "https://github.com/princenyeche/jiraone"} ) From d92292ed9f8ebdeaf6408f33c8a8900cfbe33e68 Mon Sep 17 00:00:00 2001 From: Prince Date: Thu, 18 May 2023 15:34:16 +0200 Subject: [PATCH 03/17] Added new methods * Added new methods to the endpoint class * Updated documentation --- jiraone/access.py | 493 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 389 insertions(+), 104 deletions(-) diff --git a/jiraone/access.py b/jiraone/access.py index e32f386c..ce311b85 100644 --- a/jiraone/access.py +++ b/jiraone/access.py @@ -28,12 +28,12 @@ class Credentials(object): auth2_0 = None def __init__( - self, - user: str, - password: str, - url: str = None, - oauth: dict = None, - session: Any = None, + self, + user: str, + password: str, + url: str = None, + oauth: dict = None, + session: Any = None, ) -> None: """ Instantiate the login. @@ -137,9 +137,9 @@ def oauth_session(self, oauth: dict) -> None: "Excepting a dictionary object got {} instead.".format(type(oauth)), ) if ( - "client_id" not in oauth - or "client_secret" not in oauth - or "callback_url" not in oauth + "client_id" not in oauth + or "client_secret" not in oauth + or "callback_url" not in oauth ): add_log( "You seem to be missing a key or keys in your oauth argument.", "debug" @@ -256,7 +256,7 @@ def validate_uri(uri) -> bool: # Check if the supplied url is true to the one which exist in callback_url validate_url = validate_uri(redirect_url.split("?")[0].rstrip("/")) assert ( - validate_url is True + validate_url is True ), "Your URL seems invalid as it cannot be validated." code = redirect_url.split("?")[1].split("&")[1].split("=")[-1] body = { @@ -326,11 +326,11 @@ def __token_only_session__(self, token: dict) -> None: # produce a session for the script and save the session def token_session( - self, - email: str = None, - token: str = None, - sess: str = None, - _type: str = "Bearer", + self, + email: str = None, + token: str = None, + sess: str = None, + _type: str = "Bearer", ) -> None: """ A session initializer to HTTP request. @@ -605,7 +605,7 @@ class InitProcess(Credentials): dunder method.""" def __init__( - self, user=None, password=None, url=None, oauth=None, session=None + self, user=None, password=None, url=None, oauth=None, session=None ) -> None: """ A Call to the Credential Class. @@ -790,7 +790,7 @@ def get_all_permission_scheme(cls, query: str = None) -> str: @classmethod def get_all_issue_type_schemes( - cls, query: Optional[str] = None, start_at=0, max_results=50 + cls, query: Optional[str] = None, start_at=0, max_results=50 ) -> str: """Returns a paginated list of issue type schemes. Only issue type schemes used in classic projects are returned @@ -856,7 +856,7 @@ def get_all_priorities(cls) -> str: @classmethod def search_all_notification_schemes( - cls, query: Optional[str] = None, start_at=0, max_results=50 + cls, query: Optional[str] = None, start_at=0, max_results=50 ) -> str: """Returns a paginated list of notification schemes ordered by display name. @@ -888,11 +888,11 @@ def search_all_notification_schemes( @classmethod def get_field( - cls, - query: Optional[str] = None, - start_at: int = 0, - max_results: int = 50, - system: str = None, + cls, + query: Optional[str] = None, + start_at: int = 0, + max_results: int = 50, + system: str = None, ) -> str: """Returns a paginated list of fields for Classic Jira projects. The list can include: @@ -973,11 +973,11 @@ def get_attachment_meta_data(cls, query: str, warning: bool = True) -> str: @classmethod def issue_attachments( - cls, - id_or_key: str = None, - attach_id: str = None, - uri: Optional[str] = None, - query: Optional[str] = None, + cls, + id_or_key: str = None, + attach_id: str = None, + uri: Optional[str] = None, + query: Optional[str] = None, ) -> str: """Returns the attachment content. @@ -1093,7 +1093,7 @@ def search_for_filters(cls, query: Optional[str] = None, start_at: int = 0) -> s @classmethod def search_for_dashboard( - cls, query: Optional[str] = None, start_at: int = 0 + cls, query: Optional[str] = None, start_at: int = 0 ) -> str: """Returns a paginated list of dashboards. This operation is similar to @@ -1207,19 +1207,291 @@ def search_for_screen_schemes(cls, query: int = 0) -> str: ) @classmethod - def get_project_component(cls, id_or_key) -> str: + def get_project_component(cls, ids: str = None, + id_or_key: Union[str, int] = None, + move_to: str = None, + issue_count: bool = False, + pagination: bool = False, + query: str = None + ) -> str: """Returns all components in a project. See the Get project components paginated resource if you want to get a full list of components with pagination. The project ID or project key (case-sensitive). + :param ids: A component id (required for GET, PUT, DEL) :param id_or_key: An issue key or id + :param move_to: An id of a component to replace + :param issue_count: Count issues of a component + :param pagination: Allows project component pagination. For pagination + argument, you can combine the query argument to get more details. + :param query: A query parameter for pagination argument. + e.g startAt=0&maxResults=50 + + :request POST: Creates a component. Use components to provide containers + for issues within a project. + :body param: + * assignee - Datatype (User) - denoting a user object + * assigneeType - Datatype (str) + * description - Datatype (str) + * id - Datatype(str) + * isAssigneeTypeValid - Datatype (bool) + * lead - Datatype (User) - denoting a user object + * leadAccountId - Datatype (str) + * leadUserName - Datatype (str) + * name - Datatype (str) + * project - Datatype (str) + * projectId - Datatype (int) + * realAssignee - Datatype (User) + * realAssigneeType - Datatype (str) + * self - Datatype (str) + + Example:: + body = { + "assigneeType": "PROJECT_LEAD", + "description": "This is a Jira component", + "isAssigneeTypeValid": false, + "leadAccountId": "5b10a2844cxxxxxx700ede21g", + "name": "Component 1", + "project": "BAC" + } + + :request GET: Returns a component.This operation can be + accessed anonymously. + + :request PUT: Updates a component. Any fields included in + the request are overwritten + + :body param: + * assignee - Datatype (User) - denoting a user object + * assigneeType - Datatype (str) + * description - Datatype (str) + * id - Datatype(str) + * isAssigneeTypeValid - Datatype (bool) + * lead - Datatype (User) - denoting a user object + * leadAccountId - Datatype (str) + * leadUserName - Datatype (str) + * name - Datatype (str) + * project - Datatype (str) + * projectId - Datatype (int) + * realAssignee - Datatype (User) + * realAssigneeType - Datatype (str) + * self - Datatype (str) + + :request DELETE: Deletes a component. + :query param: moveIssuesTo + :return: A string of the url """ - return "{}/rest/api/{}/project/{}/components".format( - LOGIN.base_url, "3" if LOGIN.api is True else "latest", id_or_key - ) + if ids is not None: + if move_to is not None: + return "{}/rest/api/{}/component/{}?moveIssuesTo={}".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", ids, + move_to + ) + else: + if issue_count is True: + return "{}/rest/api/{}/component/{}/relatedIssueCounts".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", ids + ) + else: + return "{}/rest/api/{}/component/{}".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", ids + ) + elif id_or_key is not None: + if pagination is True: + if query is not None: + return "{}/rest/api/{}/project/{}/component?{}".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", id_or_key, + query + ) + else: + return "{}/rest/api/{}/project/{}/component".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", id_or_key + ) + else: + return "{}/rest/api/{}/project/{}/components".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", id_or_key + ) + else: + return "{}/rest/api/{}/component".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest" + ) + + @classmethod + def get_project_versions(cls, ids: str = None, + id_or_key: Union[str, int] = None, + move: bool = False, + move_to_issue: str = None, + issue_count: bool = False, + unresolved_count: bool = False, + pagination: bool = False, + swap: bool = False, + query: str = None + ): + """Returns all versions in a project. See the Get project version + paginated. + + resource if you want to get a full list of versions without pagination. + + :param ids: A version id (required for GET, PUT, POST) + :param id_or_key: An issue key or id + :param move: Modifies the version's sequence within the project + :param move_to_issue: The ID of the version to merge into. + :param swap: Deletes a project version. Used with POST method + :param issue_count: Count issues of a version. Used with GET method + :param unresolved_count: Count of a version's unresolved issues. Used with + GET method + :param pagination: Allows project version pagination + :param query: A query parameter for pagination argument. + e.g startAt=0&maxResults=50 + + :request POST: Creates a version. + + :body param: + * archived - Datatype (bool) + * expand - Datatype (str) + * description - Datatype (str) + * id - Datatype(str) + * issueStatusForFixVersion - Datatype (VersionIssueStatus) + * moveUnfixIssuesTo - Datatype (str) + * name - Datatype (str) + * operations - Datatype (list) + * overdue - Datatype (bool) + * project - Datatype (str) + * projectId - Datatype (int) + * releaseDate - Datatype (str) + * release - Datatype (bool) + * startDate - Datatype (str) + * userReleaseDate - Datatype (str) + * userStateDate - Datatype (str) + * self - Datatype (str) + + Example:: + body = { + "archived": false, + "description": "An excellent version", + "name": "New Version 1", + "projectId": 10000, + "releaseDate": "2010-07-06", + "released": true + } + + :request GET: Returns all versions in a project.This operation can be + accessed anonymously. You can either use a return of all version or + use the pagination argument for a paginated list of all versions. + Project key required + + :query param: Used for query argument in pagination + The query argument is a string and can be constructed as + below + query = "startAt=0&maxResults=50&orderBy=description,name&status=released + &expand=issuestatus" + + :request PUT: Updates a version. An id must be supplied + + :body param: + * archived - Datatype (bool) + * expand - Datatype (str) + * description - Datatype (str) + * id - Datatype(str) + * issueStatusForFixVersion - Datatype (VersionIssueStatus) + * moveUnfixIssuesTo - Datatype (str) + * name - Datatype (str) + * operations - Datatype (list) + * overdue - Datatype (bool) + * project - Datatype (str) + * projectId - Datatype (int) + * releaseDate - Datatype (str) + * release - Datatype (bool) + * startDate - Datatype (str) + * userReleaseDate - Datatype (str) + * userStateDate - Datatype (str) + * self - Datatype (str) + + :request POST: Deletes a version. + + :body param: Used with delete and replace version + + * customFieldReplacementList - Datatype (list) + * moveAffectedIssuesTo - Datatype (int) + * moveFixedIssuesTo - Datatype (int) + + Example:: + + body = { + "customFieldReplacementList": [ + { + "customFieldId": 66, + "moveTo": 67 + } + ], + "moveAffectedIssuesTo": 97, + "moveFixIssuesTo": 92 + } + + :body param: Moves a version + + * after - Datatype (str) + * position - - Datatype (str) + + + For pagination argument, you can send + + :return: A string of the url + """ + if ids is not None: + if move is True: + if move_to_issue is not None: + return "{}/rest/api/{}/version/{}/mergeto/{}".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", ids, + move + ) + else: + + return "{}/rest/api/{}/version/{}/move".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", ids + ) + + else: + if issue_count is True: + return "{}/rest/api/{}/version/{}/relatedIssueCounts".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", ids + ) + elif unresolved_count is True: + return "{}/rest/api/{}/version/{}/unresolvedIssueCount".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", ids + ) + else: + if swap is True: + return "{}/rest/api/{}/version/{}/removeAndSwap".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", ids + ) + else: + + return "{}/rest/api/{}/version/{}".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", ids + ) + elif id_or_key is not None: + if pagination is True: + if query is not None: + return "{}/rest/api/{}/project/{}/version?{}".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", id_or_key, + query + ) + else: + return "{}/rest/api/{}/project/{}/version".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", id_or_key + ) + else: + return "{}/rest/api/{}/project/{}/versions".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", id_or_key + ) + else: + return "{}/rest/api/{}/version".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest" + ) @classmethod def get_resolutions(cls) -> str: @@ -1232,7 +1504,7 @@ def get_resolutions(cls) -> str: @classmethod def remote_links( - cls, key_or_id: Optional[str] = None, link_id: Optional[str] = None + cls, key_or_id: Optional[str] = None, link_id: Optional[str] = None ) -> str: """Returns the remote issue links for an issue. When a remote issue link global ID is provided @@ -1314,21 +1586,21 @@ def issue_link(cls, link_id: Optional[str] = None) -> str: @classmethod def work_logs( - cls, - key_or_id: Optional[str] = None, - start_at: int = 0, - max_results: int = 1048576, - started_after: int = None, - started_before: int = None, - worklog_id: Optional[str] = None, - expand: Optional[str] = None, - notify_users: Optional[bool] = True, - adjust_estimate: Optional[str] = "auto", - new_estimate: Optional[str] = None, - increase_by: Optional[str] = None, - override_editable_flag: Optional[bool] = False, - reduce_by: Optional[str] = None, - since: Optional[int] = None, + cls, + key_or_id: Optional[str] = None, + start_at: int = 0, + max_results: int = 1048576, + started_after: int = None, + started_before: int = None, + worklog_id: Optional[str] = None, + expand: Optional[str] = None, + notify_users: Optional[bool] = True, + adjust_estimate: Optional[str] = "auto", + new_estimate: Optional[str] = None, + increase_by: Optional[str] = None, + override_editable_flag: Optional[bool] = False, + reduce_by: Optional[str] = None, + since: Optional[int] = None, ) -> str: """Returns worklogs for an issue, starting from the oldest worklog or from the worklog started on or @@ -1716,7 +1988,7 @@ def task(cls, task_id: Optional[str] = None, method: Optional[str] = "GET") -> s @classmethod def issue_watchers( - cls, key_or_id: Optional[str] = None, account_id: Optional[str] = None + cls, key_or_id: Optional[str] = None, account_id: Optional[str] = None ) -> str: """This operation requires the Allow users to watch issues option to be ON. @@ -1791,10 +2063,10 @@ def instance_info(cls): @classmethod def worklog_properties( - cls, - key_or_id: Optional[str] = None, - worklog_id: Optional[str] = None, - property_key: Optional[str] = None, + cls, + key_or_id: Optional[str] = None, + worklog_id: Optional[str] = None, + property_key: Optional[str] = None, ) -> str: """ Returns the worklog properties of an issue @@ -1847,11 +2119,11 @@ def server_info(cls) -> str: @classmethod def project_avatar( - cls, - key_or_id: Optional = None, - avatar_id: Optional = None, - method: Optional = "get", - **kwargs, + cls, + key_or_id: Optional = None, + avatar_id: Optional = None, + method: Optional = "get", + **kwargs, ) -> str: """ Performs multiple operations to the avatar displayed for a project. @@ -2020,7 +2292,7 @@ def create_board(cls) -> str: @classmethod def get_board_by_filter_id( - cls, filter_id, start_at: int = 0, max_results: int = 50 + cls, filter_id, start_at: int = 0, max_results: int = 50 ) -> str: """Returns any boards which use the provided filter id. @@ -2056,7 +2328,7 @@ def get_board(cls, board_id) -> str: @classmethod def get_issues_on_backlog( - cls, board_id, query: str = None, start_at: int = 0, max_results: int = 50 + cls, board_id, query: str = None, start_at: int = 0, max_results: int = 50 ) -> str: """Returns all issues from the board's backlog, for the given board ID. @@ -2094,7 +2366,7 @@ def get_issues_on_backlog( @classmethod def get_issues_on_board( - cls, board_id, query: str = None, start_at: int = 0, max_results: int = 50 + cls, board_id, query: str = None, start_at: int = 0, max_results: int = 50 ) -> str: """Returns all issues from a board, for a given board ID. @@ -2150,7 +2422,7 @@ def move_issues_to_board(cls, board_id) -> str: @classmethod def get_projects_on_board( - cls, board_id, start_at: int = 0, max_results: int = 50 + cls, board_id, start_at: int = 0, max_results: int = 50 ) -> str: """Returns all projects that are associated with the board, for the given board ID. @@ -2171,7 +2443,7 @@ def get_projects_on_board( @classmethod def get_all_quick_filters( - cls, board_id, start_at: int = 0, max_results: int = 50 + cls, board_id, start_at: int = 0, max_results: int = 50 ) -> str: """Returns all quick filters from a board, for a given board ID. @@ -2206,7 +2478,7 @@ def get_quick_filter(cls, board_id, quick_filter_id) -> str: @classmethod def get_all_sprints( - cls, board_id, query: str = None, start_at: int = 0, max_results: int = 50 + cls, board_id, query: str = None, start_at: int = 0, max_results: int = 50 ) -> str: """Get all Sprint on a Board. @@ -2319,7 +2591,7 @@ def get_server_info(cls) -> str: @classmethod def get_organizations( - cls, start: int = 0, limit: int = 50, account_id: str = None + cls, start: int = 0, limit: int = 50, account_id: str = None ) -> str: """This method returns a list of organizations in the Jira Service Management instance. @@ -2466,7 +2738,7 @@ def remove_users_from_organization(cls, org_id) -> str: @classmethod def get_sd_organizations( - cls, service_desk_id, start: int = 0, limit: int = 50, account_id: str = None + cls, service_desk_id, start: int = 0, limit: int = 50, account_id: str = None ) -> str: """This method returns a list of all organizations associated with a service desk. @@ -2529,7 +2801,7 @@ def remove_sd_organization(cls, service_desk_id) -> str: @classmethod def get_customers( - cls, service_desk_id, start: int = 0, limit: int = 50, query: str = None + cls, service_desk_id, start: int = 0, limit: int = 50, query: str = None ) -> str: """This method returns a list of the customers on a service desk. @@ -2707,11 +2979,11 @@ def group_jira_users(cls, group_name: str, account_id: str = None) -> str: @classmethod def projects( - cls, - id_or_key, - query: Optional[str] = None, - uri: Optional[str] = None, - enable_undo: Optional[bool] = None, + cls, + id_or_key, + query: Optional[str] = None, + uri: Optional[str] = None, + enable_undo: Optional[bool] = None, ) -> str: """Create, delete, update, archive, get status. @@ -2792,11 +3064,11 @@ def projects( @classmethod def issues( - cls, - issue_key_or_id: Optional[Any] = None, - query: Optional[Any] = None, - uri: Optional[str] = None, - event: bool = False, + cls, + issue_key_or_id: Optional[Any] = None, + query: Optional[Any] = None, + uri: Optional[str] = None, + event: bool = False, ) -> str: """Creates issues, delete issues, bulk create issue, transitions. @@ -2918,13 +3190,13 @@ def issues( @classmethod def comment( - cls, - query: str = None, - key_or_id: str = None, - start_at: int = 0, - max_results: int = 50, - ids: int = None, - event: bool = False, + cls, + query: str = None, + key_or_id: str = None, + start_at: int = 0, + max_results: int = 50, + ids: int = None, + event: bool = False, ) -> str: """Create, update, delete or get a comment. @@ -3007,7 +3279,7 @@ def comment( f"{LOGIN.base_url}/rest/api/{'3' if LOGIN.api is True else 'latest'}/issue/{key_or_id}/comment" if event is False else f"{LOGIN.base_url}/rest/api/{'3' if LOGIN.api is True else 'latest'}/issue/{key_or_id}/comment?" - f"startAt={start_at}&maxResults={max_results}&{query}" + f"startAt={start_at}&maxResults={max_results}&{query}" ) elif key_or_id is not None and ids is not None: @@ -3021,20 +3293,32 @@ def comment( @classmethod def issue_export( - cls, url: Optional[str] = None, start: int = 0, limit: int = 1000 + cls, query: Optional[str] = None, + start: int = 0, + limit: int = 1000, + fields: str = "all" ) -> str: """ Generate an export of Jira issues using a JQL. - :param url: A JQL of the issues to be exported + :param query: A JQL of the issues to be exported :param start: A start counter :param limit: Max limit allowed for export + :param fields: Determine if export is current fields or all fields. + + .. versionchanged:: 0.7.6 + + fields - added a keyword argument for transition between current and all + fields + + query - Renamed "url" parameter into "query" for better clarity of names. + :return: A string of the export URL """ return ( - "{}/sr/jira.issueviews:searchrequest-csv-all-fields/temp/" + "{}/sr/jira.issueviews:searchrequest-csv-{}-fields/temp/" "SearchRequest.csv?jqlQuery={}&tempMax={}&pager/start={}".format( - LOGIN.base_url, url, limit, start + LOGIN.base_url, fields, query, limit, start ) ) @@ -3080,7 +3364,7 @@ class For(object): """ def __init__( - self, data: Union[list, tuple, dict, set, str, int], limit: int = 0 + self, data: Union[list, tuple, dict, set, str, int], limit: int = 0 ) -> None: self.data = data if isinstance(self.data, int): @@ -3239,6 +3523,7 @@ def get_field(find_field: str = None) -> Any: "key": a["key"], "searchable": a["searchable"], "type": a["schema"]["type"], + "system": a["schema"].get("system"), } return { "name": a["name"], @@ -3260,13 +3545,13 @@ def get_field(find_field: str = None) -> Any: } def update_field_data( - self, - data: Any = None, - find_field: str = None, - field_type: str = "custom", - key_or_id: Union[str, int] = None, - show: bool = True, - **kwargs, + self, + data: Any = None, + find_field: str = None, + field_type: str = "custom", + key_or_id: Union[str, int] = None, + show: bool = True, + **kwargs, ) -> Any: """Field works for. @@ -3457,7 +3742,7 @@ def separated(pull: Any = Any) -> Any: attr = {search["id"]: data} payload = self.data_load(attr) elif ( - options == "add" or options == "remove" + options == "add" or options == "remove" ): # update the field with the desired value if not isinstance(data, list): raise JiraOneErrors("wrong", "Expecting a list of values") @@ -3612,7 +3897,7 @@ def separated(pull: Any = Any) -> Any: attr = {search["id"]: data} payload = self.data_load(attr) elif ( - options == "add" or options == "remove" + options == "add" or options == "remove" ): # update the field with the desired value if not isinstance(data, list): raise JiraOneErrors("wrong") @@ -3742,10 +4027,10 @@ def cascading(data: Any = Any) -> Any: @staticmethod def extract_issue_field_options( - key_or_id: Union[str, int] = None, - search: Dict = None, - amend: str = None, - data: Any = Any, + key_or_id: Union[str, int] = None, + search: Dict = None, + amend: str = None, + data: Any = Any, ) -> Any: """Get the option from an issue. From b73f647e911008eed33a6c7c73373ff59dcb2276 Mon Sep 17 00:00:00 2001 From: Prince Date: Thu, 18 May 2023 15:38:51 +0200 Subject: [PATCH 04/17] Update version * Update to version number --- jiraone/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jiraone/__init__.py b/jiraone/__init__.py index 1e8fbd78..e26ced50 100644 --- a/jiraone/__init__.py +++ b/jiraone/__init__.py @@ -36,7 +36,7 @@ from jiraone.management import manage __author__ = "Prince Nyeche" -__version__ = "0.7.5" +__version__ = "0.7.6" __all__ = [ "LOGIN", "endpoint", From 8ce322885d2cfaf414b121024a638e39a8f6dad5 Mon Sep 17 00:00:00 2001 From: Prince Date: Thu, 18 May 2023 15:40:06 +0200 Subject: [PATCH 05/17] Added new function * Added a process executor function * Added regular expression for custom searches --- jiraone/utils.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/jiraone/utils.py b/jiraone/utils.py index c75e0008..a240f286 100644 --- a/jiraone/utils.py +++ b/jiraone/utils.py @@ -5,6 +5,7 @@ provide additional ability to jiraone. """ import typing as t +import threading class DotNotation(dict): @@ -86,3 +87,31 @@ def __expose_list__(self, value) -> None: value[key] = DotNotation(values) elif isinstance(values, list): self.__expose_list__(values) + + +def process_executor(func: t.Callable, + *, + data: t.Iterable = None, + workers: int = 4, + timeout: t.Union[float, int] = 2.5, + **kwargs) -> None: + """ + A process executor function + + :param func: A function to act upon + :param data: A data that the function processes + :param workers: Number of threads to use and wait until terminates + :param timeout: Specifies a timeout to join threads + :param kwargs: Additional arguments supplied to Thread class + :return: None + """ + process = threading.Thread(target=func, args=(data,), kwargs=kwargs) + process.start() + if threading.active_count() > workers: + process.join(timeout=timeout) + + +# Regular expressions +CUSTOM_FIELD_REGEX = r"(Custom field).+([\(]{1}.+?[\)]{1})$" +CUSTOM_FIELD_REGEX_PLUS = r"(Custom field).+([\(]{1}.+?[\)]{1})\w+" +ISSUE_KEY_REGEX = r"(?:\s|^)([A-Za-z0-9]+-[0-9]+)(?=\s|$)" From b66d7d8d1ece0ca69cafb1f2527419f437cfcc29 Mon Sep 17 00:00:00 2001 From: Prince Date: Thu, 18 May 2023 15:42:24 +0200 Subject: [PATCH 06/17] Added new function * Update to module documentation --- jiraone/utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/jiraone/utils.py b/jiraone/utils.py index a240f286..936c170e 100644 --- a/jiraone/utils.py +++ b/jiraone/utils.py @@ -99,10 +99,11 @@ def process_executor(func: t.Callable, A process executor function :param func: A function to act upon - :param data: A data that the function processes + :param data: A data that the function processes (arguments) :param workers: Number of threads to use and wait until terminates :param timeout: Specifies a timeout to join threads - :param kwargs: Additional arguments supplied to Thread class + :param kwargs: Additional arguments supplied to Thread class or + the keyword arguments to the function :return: None """ process = threading.Thread(target=func, args=(data,), kwargs=kwargs) From 5d66aab81442493de0e5a6d1d17138048fb239fd Mon Sep 17 00:00:00 2001 From: Prince Date: Thu, 18 May 2023 16:56:05 +0200 Subject: [PATCH 07/17] Removed regex variable * Removed unused regex variable --- jiraone/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/jiraone/utils.py b/jiraone/utils.py index 936c170e..22a2e931 100644 --- a/jiraone/utils.py +++ b/jiraone/utils.py @@ -114,5 +114,4 @@ def process_executor(func: t.Callable, # Regular expressions CUSTOM_FIELD_REGEX = r"(Custom field).+([\(]{1}.+?[\)]{1})$" -CUSTOM_FIELD_REGEX_PLUS = r"(Custom field).+([\(]{1}.+?[\)]{1})\w+" ISSUE_KEY_REGEX = r"(?:\s|^)([A-Za-z0-9]+-[0-9]+)(?=\s|$)" From e483af4ffd7e7605b6e9efd45ed216892827b09f Mon Sep 17 00:00:00 2001 From: Prince Date: Sun, 21 May 2023 22:02:57 +0200 Subject: [PATCH 08/17] Update utils.py * Added regex string * DateTime Python string directive * Threading function --- jiraone/utils.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/jiraone/utils.py b/jiraone/utils.py index 22a2e931..dcae9656 100644 --- a/jiraone/utils.py +++ b/jiraone/utils.py @@ -115,3 +115,13 @@ def process_executor(func: t.Callable, # Regular expressions CUSTOM_FIELD_REGEX = r"(Custom field).+([\(]{1}.+?[\)]{1})$" ISSUE_KEY_REGEX = r"(?:\s|^)([A-Za-z0-9]+-[0-9]+)(?=\s|$)" + +# date utility +dd_MM_YYYY_hh_MM_AM_PM = "%d/%m/%Y %I:%M %p" # dd/MM/YYYY h:mm: AM +dd_MMM_yy_hh_MM_AM_PM = "%d/%b/%y %I:%M %p" # dd/MMM/yy h:mm: AM +dd_MMM_YYYY_hh_MM_SS_AM_PM = "%d-%b-%Y %I:%M:%S %p" # dd-MMM-YYYY h:mm:ss AM +MM_dd_yy_hh_MM_AM_PM = "%m-%d-%y %I:%M %p" # MM-dd-yy h:mm AM +YYYY_MM_dd_hh_MM_SS_AM_PM = "%Y-%m-%d %I:%M:%S %p" # YYYY-MM-dd h:mm:ss AM +dd_MM_YYYY_hh_MM_SS_AM_PM = "%d/%m/%Y %I:%M:%S %p" # dd-MM-YYYY h:mm:ss AM +YYYY_MM_dd_HH_MM_SS_MS_TZ = "%Y-%m-%dT%H:%M:%S.%f%z" # YYYY-MM-ddTHH:mm:ss.s+0 +YYYY_MM_dd_HH_MM_SS_MS = "%Y-%m-%d %H:%M:%S.%f" # YYYY-MM-ddTHH:mm:ss.s From debb1b8fea4a806821b93cae563303471ce7b0c4 Mon Sep 17 00:00:00 2001 From: Prince Date: Mon, 22 May 2023 12:23:55 +0200 Subject: [PATCH 09/17] Update utils.py * Updated date format into a class for better access --- jiraone/utils.py | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/jiraone/utils.py b/jiraone/utils.py index dcae9656..ef2ca4f1 100644 --- a/jiraone/utils.py +++ b/jiraone/utils.py @@ -6,6 +6,7 @@ """ import typing as t import threading +import enum class DotNotation(dict): @@ -116,12 +117,26 @@ def process_executor(func: t.Callable, CUSTOM_FIELD_REGEX = r"(Custom field).+([\(]{1}.+?[\)]{1})$" ISSUE_KEY_REGEX = r"(?:\s|^)([A-Za-z0-9]+-[0-9]+)(?=\s|$)" + # date utility -dd_MM_YYYY_hh_MM_AM_PM = "%d/%m/%Y %I:%M %p" # dd/MM/YYYY h:mm: AM -dd_MMM_yy_hh_MM_AM_PM = "%d/%b/%y %I:%M %p" # dd/MMM/yy h:mm: AM -dd_MMM_YYYY_hh_MM_SS_AM_PM = "%d-%b-%Y %I:%M:%S %p" # dd-MMM-YYYY h:mm:ss AM -MM_dd_yy_hh_MM_AM_PM = "%m-%d-%y %I:%M %p" # MM-dd-yy h:mm AM -YYYY_MM_dd_hh_MM_SS_AM_PM = "%Y-%m-%d %I:%M:%S %p" # YYYY-MM-dd h:mm:ss AM -dd_MM_YYYY_hh_MM_SS_AM_PM = "%d/%m/%Y %I:%M:%S %p" # dd-MM-YYYY h:mm:ss AM -YYYY_MM_dd_HH_MM_SS_MS_TZ = "%Y-%m-%dT%H:%M:%S.%f%z" # YYYY-MM-ddTHH:mm:ss.s+0 -YYYY_MM_dd_HH_MM_SS_MS = "%Y-%m-%d %H:%M:%S.%f" # YYYY-MM-ddTHH:mm:ss.s +@enum.unique +class DateFormat(enum.StrEnum): + """ + A representation of Python's string directive for + datetime formats + """ + + dd_MM_YYYY_hh_MM_AM_PM = "%d/%m/%Y %I:%M %p" # dd/MM/YYYY h:mm: AM + dd_MMM_yy_hh_MM_AM_PM = "%d/%b/%y %I:%M %p" # dd/MMM/yy h:mm: AM + dd_MMM_YYYY_hh_MM_SS_AM_PM = "%d-%b-%Y %I:%M:%S %p" # dd-MMM-YYYY h:mm:ss AM + MM_dd_yy_hh_MM_AM_PM = "%m-%d-%y %I:%M %p" # MM-dd-yy h:mm AM + YYYY_MM_dd_hh_MM_SS_AM_PM = "%Y-%m-%d %I:%M:%S %p" # YYYY-MM-dd h:mm:ss AM + dd_MM_YYYY_hh_MM_SS_AM_PM = "%d/%m/%Y %I:%M:%S %p" # dd-MM-YYYY h:mm:ss AM + YYYY_MM_dd_HH_MM_SS_MS_TZ = "%Y-%m-%dT%H:%M:%S.%f%z" # YYYY-MM-ddTHH:mm:ss.s+0 + YYYY_MM_dd_HH_MM_SS_MS = "%Y-%m-%d %H:%M:%S.%f" # YYYY-MM-ddTHH:mm:ss.s + dd_MM_yy_hh_MM_AM_PM = "%d/%m/%y %I:%M %p" # dd/MM/yy h:mm AM + YYYY_MM_dd_T_HH_MM_SS_MS = "%Y-%m-%dT%H:%M:%S.%f" # YYYY-MM-ddTHH:MM:SS.s + MM_dd_yy_space_hh_MM_AM_PM = "%m/%d/%y %I:%M %p" # MM/dd/yy h:mm AM + dd_MM_YYYY_space_hh_MM_AM_PM = "%d/%m/%Y %I:%M %p" # dd/MM/YYYY h:mm AM + MMM_dd_YYYY_hh_MM_SS_AM_PM = "%b %d, %Y %I:%M:%S %p" # MMM dd, YYYY h:mm:ss AM + MM_dd_YYYY_hh_MM_AM_PM = "%m/%d/%Y %I:%M %p" # MM/dd/YYYY h:mm AM From 1b5d05cbfbadabd06da73312435c960e20b00e02 Mon Sep 17 00:00:00 2001 From: Prince Date: Mon, 22 May 2023 12:45:11 +0200 Subject: [PATCH 10/17] Update utils.py * Made changes to the date format using Enum class --- jiraone/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/jiraone/utils.py b/jiraone/utils.py index ef2ca4f1..e66cd418 100644 --- a/jiraone/utils.py +++ b/jiraone/utils.py @@ -119,8 +119,7 @@ def process_executor(func: t.Callable, # date utility -@enum.unique -class DateFormat(enum.StrEnum): +class DateFormat(enum.Enum): """ A representation of Python's string directive for datetime formats @@ -140,3 +139,4 @@ class DateFormat(enum.StrEnum): dd_MM_YYYY_space_hh_MM_AM_PM = "%d/%m/%Y %I:%M %p" # dd/MM/YYYY h:mm AM MMM_dd_YYYY_hh_MM_SS_AM_PM = "%b %d, %Y %I:%M:%S %p" # MMM dd, YYYY h:mm:ss AM MM_dd_YYYY_hh_MM_AM_PM = "%m/%d/%Y %I:%M %p" # MM/dd/YYYY h:mm AM + dd_MMM_yy = "%d/%b/%y" # dd/MMM/yy From bd60e2ecfaf6273c42c883c929e7aada413c562b Mon Sep 17 00:00:00 2001 From: Prince Date: Mon, 22 May 2023 12:56:02 +0200 Subject: [PATCH 11/17] Update utils.py * Removed Enum class for string usability --- jiraone/utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/jiraone/utils.py b/jiraone/utils.py index e66cd418..2c21df8c 100644 --- a/jiraone/utils.py +++ b/jiraone/utils.py @@ -6,7 +6,6 @@ """ import typing as t import threading -import enum class DotNotation(dict): @@ -119,7 +118,7 @@ def process_executor(func: t.Callable, # date utility -class DateFormat(enum.Enum): +class DateFormat: """ A representation of Python's string directive for datetime formats From 4c2975d066df94327c6443647e6e9d3bfcb71dc5 Mon Sep 17 00:00:00 2001 From: Prince Date: Mon, 22 May 2023 21:10:43 +0200 Subject: [PATCH 12/17] Update access.py * Added new methods to the access module --- jiraone/access.py | 77 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 74 insertions(+), 3 deletions(-) diff --git a/jiraone/access.py b/jiraone/access.py index ce311b85..5f792ded 100644 --- a/jiraone/access.py +++ b/jiraone/access.py @@ -654,17 +654,25 @@ def myself(cls) -> str: ) @classmethod - def search_users(cls, query: int = 0, max_result: int = 50) -> str: + def search_users(cls, start_at: int = 0, max_result: int = 50, + default: bool = False) -> str: """Search multiple users and retrieve the data - :param query: An integer record row + :param start_at: An integer record row :param max_result: An integer of max capacity + :param default: Changes context between default user search + and all search + :return: A string of the url """ + if default is True: + return "{}/rest/api/{}/users?startAt={}&maxResults={}".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", start_at, max_result + ) return "{}/rest/api/{}/users/search?startAt={}&maxResults={}".format( - LOGIN.base_url, "3" if LOGIN.api is True else "latest", query, max_result + LOGIN.base_url, "3" if LOGIN.api is True else "latest", start_at, max_result ) @classmethod @@ -1584,6 +1592,69 @@ def issue_link(cls, link_id: Optional[str] = None) -> str: LOGIN.base_url, "3" if LOGIN.api is True else "latest" ) + @classmethod + def issue_link_types(cls, link_type_id: Optional[str] = None, + ) -> str: + """ + Returns a list of all issue link types. + + :request GET: To use this operation, the site must + have issue linking enabled. + + :request GET: Get issue link types, this requires the + linked type id. + + :request POST: Creates an issue link type. Use this + operation to create descriptions of the reasons + why issues are linked. + + :body param: + * id - Datatype (str) + * inward - Datatype (str) + * name - Datatype (str) + * outward - Datatype (str) + * self - Datatype (str) + + Example:: + payload = { + "inward": "Duplicated by", + "name": "Duplicate", + "outward": "Duplicates" + } + + :request DELETE: Deletes an issue link type. + This requires the linked type id + + :request PUT: Updates an issue link type. + This requires the linked type id + + :body param: + * id - Datatype (str) + * inward - Datatype (str) + * name - Datatype (str) + * outward - Datatype (str) + * self - Datatype (str) + + Example:: + payload = { + "inward": "Duplicated by", + "name": "Duplicate", + "outward": "Duplicates" + } + + :param link_type_id: The link type id + + :return: + """ + if link_type_id: + return "{}/rest/api/{}/issueLinkType/{}".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest", link_type_id + ) + else: + return "{}/rest/api/{}/issueLinkType".format( + LOGIN.base_url, "3" if LOGIN.api is True else "latest" + ) + @classmethod def work_logs( cls, From 7bbd3d2bd872e4564267b9318a75749c2d2f8447 Mon Sep 17 00:00:00 2001 From: Prince Date: Mon, 22 May 2023 21:13:42 +0200 Subject: [PATCH 13/17] Update utils.py * Added more regular expression --- jiraone/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/jiraone/utils.py b/jiraone/utils.py index 2c21df8c..3b86e2ca 100644 --- a/jiraone/utils.py +++ b/jiraone/utils.py @@ -115,6 +115,8 @@ def process_executor(func: t.Callable, # Regular expressions CUSTOM_FIELD_REGEX = r"(Custom field).+([\(]{1}.+?[\)]{1})$" ISSUE_KEY_REGEX = r"(?:\s|^)([A-Za-z0-9]+-[0-9]+)(?=\s|$)" +INWARD_ISSUE_LINK = r"(Inward issue link).+([\(]{1}.+?[\)]{1})$" +OUTWARD_ISSUE_LINK = r"(Outward issue link).+([\(]{1}.+?[\)]{1})$" # date utility From df87d2d29dfe4cf263d65e18b847435950aa4f68 Mon Sep 17 00:00:00 2001 From: Prince Date: Sat, 27 May 2023 09:12:23 +0200 Subject: [PATCH 14/17] Update reporting.py * Added new arguments for `issue_export()` * Now possible to export in JSON format --- jiraone/reporting.py | 2839 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 2478 insertions(+), 361 deletions(-) diff --git a/jiraone/reporting.py b/jiraone/reporting.py index 9898ca8b..218eed38 100644 --- a/jiraone/reporting.py +++ b/jiraone/reporting.py @@ -20,12 +20,12 @@ class Projects: @staticmethod def projects_accessible_by_users( - *args: str, - project_folder: str = "Project", - project_file_name: str = "project_file.csv", - user_extraction_file: str = "project_extract.csv", - permission: str = "BROWSE", - **kwargs, + *args: str, + project_folder: str = "Project", + project_file_name: str = "project_file.csv", + user_extraction_file: str = "project_extract.csv", + permission: str = "BROWSE", + **kwargs, ) -> None: """ Send an argument as String equal to a value, example: status=live. @@ -117,8 +117,8 @@ def project(): ] project() elif ( - "totalIssueCount" in insight - and "lastIssueUpdateTime" not in insight + "totalIssueCount" in insight + and "lastIssueUpdateTime" not in insight ): raw = [ keys, @@ -151,9 +151,9 @@ def project(): @staticmethod def dashboards_shared_with( - dashboard_folder: str = "Dashboard", - dashboard_file_name: str = "dashboard_file.csv", - **kwargs, + dashboard_folder: str = "Dashboard", + dashboard_file_name: str = "dashboard_file.csv", + **kwargs, ) -> None: """ Retrieve the Dashboard Id/Name/owner and who it is shared with. @@ -244,10 +244,10 @@ def dash_run(): @staticmethod def get_all_roles_for_projects( - roles_folder: str = "Roles", - roles_file_name: str = "roles_file.csv", - user_extraction: str = "role_users.csv", - **kwargs, + roles_folder: str = "Roles", + roles_file_name: str = "roles_file.csv", + user_extraction: str = "role_users.csv", + **kwargs, ) -> None: """ Get the roles available in a project and which user is assigned to which @@ -374,10 +374,10 @@ def pull_data() -> None: add_log("File extraction completed", "info") def get_attachments_on_projects( - self, - attachment_folder: str = "Attachment", - attachment_file_name: str = "attachment_file.csv", - **kwargs, + self, + attachment_folder: str = "Attachment", + attachment_file_name: str = "attachment_file.csv", + **kwargs, ) -> None: """Return all attachments of a Project or Projects @@ -631,13 +631,13 @@ def bytes_converter(val) -> str: @staticmethod def move_attachments_across_instances( - attach_folder: str = "Attachment", - attach_file: str = "attachment_file.csv", - key: int = 3, - attach: int = 8, - file: int = 6, - last_cell: bool = True, - **kwargs, + attach_folder: str = "Attachment", + attach_file: str = "attachment_file.csv", + key: int = 3, + attach: int = 8, + file: int = 6, + last_cell: bool = True, + **kwargs, ) -> None: """Ability to post an attachment into another Instance. @@ -720,12 +720,12 @@ def move_attachments_across_instances( @staticmethod def download_attachments( - file_folder: str = None, - file_name: str = None, - download_path: str = "Downloads", - attach: int = 8, - file: int = 6, - **kwargs, + file_folder: str = None, + file_name: str = None, + download_path: str = "Downloads", + attach: int = 8, + file: int = 6, + **kwargs, ) -> None: """Download the attachments to your local device read from a csv file. @@ -779,7 +779,7 @@ def download_attachments( @staticmethod def get_total_comments_on_issues( - folder: str = "Comment", file_name: str = "comment_file.csv", **kwargs + folder: str = "Comment", file_name: str = "comment_file.csv", **kwargs ) -> None: """Return a report with the number of comments sent to or by a reporter (if any). @@ -994,34 +994,40 @@ def write_result() -> None: @staticmethod def change_log( - folder: str = "ChangeLog", - file: str = "change_log.csv", - back_up: bool = False, - allow_cp: bool = True, - **kwargs: Union[str, bool], + folder: str = "ChangeLog", + file: str = "change_log.csv", + back_up: bool = False, + allow_cp: bool = True, + **kwargs: Union[str, bool], ) -> None: """Extract the issue history of an issue. - Query the changelog endpoint if using cloud instance or straight away define access to it on server. + Query the changelog endpoint if using cloud instance or + straight away define access to it on server. Extract the histories and export it to a CSV file. :param folder: A name of a folder datatype String :param file: A name of a file datatype String - :param back_up: A boolean to check whether a history file is exist or not. + :param back_up: A boolean to check whether a history file + is exist or not. :param allow_cp: Allow or deny the ability to have a checkpoint. :param kwargs: The other kwargs that can be passed as below. - * jql: (required) A valid JQL query for projects or issues. datatype -> string + * jql: (required) A valid JQL query for projects or issues. + datatype -> string - * saved_file: The name of the file which saves the iteration. datatype -> string + * saved_file: The name of the file which saves the iteration. + datatype -> string - * show_output: Show a printable output on terminal. datatype -> boolean + * show_output: Show a printable output on terminal. + datatype -> boolean - * field_name: Target a field name to render. datatype -> string + * field_name: Target a field name to render. + datatype -> string :return: None """ @@ -1124,7 +1130,7 @@ def re_instantiate(val: str) -> None: endpoint.issues( issue_key_or_id=val, query="expand=renderedFields,names,schema,operations," - "editmeta,changelog,versionedRepresentations", + "editmeta,changelog,versionedRepresentations", ) ) if get_issue_keys.status_code == 200: @@ -1189,7 +1195,7 @@ def re_instantiate(val: str) -> None: re_instantiate(keys) def changelog_history( - history: Any = Any, proj: tuple = (str, str, str) + history: Any = Any, proj: tuple = (str, str, str) ) -> None: """Structure the change history data after being retrieved. @@ -1392,8 +1398,8 @@ def render_history(past): loop: bool = False if allow_cp is True: if ( - os.path.isfile(path_builder(folder, file_name=saved_file)) - and os.stat(path_builder(folder, file_name=saved_file)).st_size != 0 + os.path.isfile(path_builder(folder, file_name=saved_file)) + and os.stat(path_builder(folder, file_name=saved_file)).st_size != 0 ): user_input = input( "An existing save point exist from your last extraction, " @@ -1473,11 +1479,11 @@ def render_history(past): ) if allow_cp is True else None def comment_on( - self, - key_or_id: str = None, - comment_id: int = None, - method: str = "GET", - **kwargs, + self, + key_or_id: str = None, + comment_id: int = None, + method: str = "GET", + **kwargs, ) -> Any: """Comment on a ticket or write on a description field. @@ -1813,22 +1819,24 @@ def mention(self, content): @staticmethod def export_issues( - *, - folder: Optional[str] = "EXPORT", - jql: Optional[str] = None, - page: Optional[tuple] = None, - **kwargs: Union[str, dict], + *, + folder: Optional[str] = "EXPORT", + jql: str = None, + page: Optional[tuple] = None, + **kwargs: Any, ) -> None: """ - Exports all Jira issue based on JQL search. If the number of issues - returned is greater than 1K issues, all the issues are finally - combined into a single file as output. + Exports all Jira issue either in CSV or JSON format based on JQL + search. If the number of issues returned is greater than 1K issues, + all the issues are finally combined into a single file as output. :param folder: The name of a folder - :param jql: A valid JQL + :param jql: A valid JQL (required) if ``merge_file`` args is not + provided - :param page: An iterative counter for page index + :param page: An iterative counter for page index denoting the + pagination for JQl search :param kwargs: Additional arguments that can be supplied. @@ -1851,7 +1859,7 @@ def export_issues( When used as a string, just supply the instance baseURL as string only. - .. code-block:: python + Example:: # previous expression base = "https://yourinstance.atlassian.net" @@ -1860,7 +1868,7 @@ def export_issues( Example of dict construct which can be stored as a ``.json`` file. - .. code-block:: json + Example:: { "user": "prince@example.com", @@ -1877,11 +1885,212 @@ def export_issues( first before it can become useful. * encoding: Datatype (str) Ability to alter the encoding - of the exported data to file_writer function. + of the exported data to ``file_writer`` function. * errors: Datatype (str) Ability to alter the error type used in encoding argument if the encoded character fails to decode. + * extension: Datatype (str) Ability to export the issues in + either CSV or JSON format. e.g. options are "csv" or "json" + + Example:: + # previous statements + PROJECT.export_issues(jql=jql, extension="csv") + + * field_type: Datatype (str) Ability to define if all fields + or default fields are exported. e.g. options are "all" or + "current". The default is set to "all" for every field.If you + want to export only default fields, set it to "current". + This will export the default field your current users has + defined on the UI. + + Example:: + # previous statements + my_current_field = "current" + PROJECT.export_issues(jql=jql, field_type=my_current_field) + + * exclude_fields: Datatype (list) Ability to exclude certain + fields from the exported file. This field must be an exact + string of the custom field name. This argument cannot be used + when ``include_fields`` args is not empty + + Example:: + # previous statements + fields = ["Labels", "Comment", "SupportTeam"] + PROJECT.export_issues(jql=jql, exclude_fields=fields) + + * include_fields: Datatype (list) Ability to include certain + fields from the exported file. This field must be an exact string + of the custom field name. This argument cannot be used when + ``exclude_fields`` is not empty. + + Example:: + # previous statements + fields = ["Summary", "Comment", "SupportTeam"] + PROJECT.export_issues(jql=jql, include_fields=fields) + + * workers: Datatype (int) Ability to use process workers for + faster iterations. This helps during http request to + endpoints. By default, 4 threads are put into motion + + Example:: + # previous statement + workers = 20 + PROJECT.export_issues(jql=jql, workers=workers) + + * is_sd_internal: Datatype (bool) Ability to add additional + properties to a JSON comment export for JSM projects. + This argument expects that a comment field column must include + an addition string attribute as "true" or "false" specifically + tailored for JSM projects. + + Example:: + # Given the below is a CSV row of a comment field + "25/Apr/22 11:15 AM; + 557058:f58131cb-b67d-43c7-b30d-6b58d40bd077; + Hello this work;true" + + The last value there "true" will determine the visibility of + a comment on a JSM project import. + + * merge_files: Datatype (list) Ability to combine various CSV + files not necessarily Jira related into a single CSV file. + You can supply the filename in a list e.g. + + Example:: + # previous statements + my_files = ["file1.csv", "file2.csv", file3.csv"] + PROJECT.export_issues(merge_files=my_files) + + When merge_files argument is used, it overrides other + arguments such as jql, page, encoding and errors. + Please ensure that these files are in the same directory as + indicated by the ``folder`` argument + + * csv_to_json: Datatype (str) Ability to provide a CSV Jira + export to be converted to JSON format. This argument expects + the name of the CSV file name. It expects a "Project key" + column to be included in the CSV file. + + * timeout: Datatype (float or int) Ability to increase the + timeout period required for the ``workers`` argument. If you + do increase the ``workers`` argument, you have to set a + plausible timeout that allows all thread to join + and finish executing to prevent errors e.g. KeyError + + * json_properties: Datatype (list) Ability to add additional + properties to the JSON export option such as users or links + objects. Examples of valid properties: users, links and + history + + Example:: + # previous statements + props = ["users", "links"] + jql = "project in (ABC, IT)" + PROJECT.export_issues(jql=jql, extension="json", + json_properties=props) + + * check_auth: Datatype (bool) Ability to turn off or on the + authentication check that the export function uses. Only + useful when ``merge_files`` argument is used alone. + + Example:: + # previous statements + my_files = ["file1.csv", "file2.csv", file3.csv"] + PROJECT.export_issues(merge_files=my_files, + check_auth=False) + + * date_format: Datatype (str) Ability to use certain date + pattern to parse datetime Jira fields. Useful for datetime + custom field + + Example:: + + # previous statements + date_pattern = "%d/%m/%Y %I:%M %p" + # The above would translate into dd/MM/YYYY 09:14 AM + jql = "project in (ABC, IT)" + PROJECT.export_issues(jql=jql, extension="json", + date_format=date_pattern) + + * json_custom_type: Datatype (list) Ability to exclude + certain customType from the JSON export. The name has to + be an exact string or unique starting string of the custom + type. By default, this argument omits two custom type + of which one of such custom type is given in the below + example + + Example:: + + # previous statements + _type = ["com.atlassian.plugins.atlassian-connect-plugin"] + # The above is referring to the custom field type + jql = "project in (ABC, IT)" + PROJECT.export_issues(jql=jql, extension="json", + json_custom_type=_type) + + * is_cache: Datatype (bool) Ability to save frequently used + http call if the same request is done within a given amount + of time for faster iterations. + + * use_cache: Datatype (bool) Ability to use cached http call + object at will. This allows the use of previously saved + objects. If none exist, a new http call is made and the data + is saved as an JSON object used as cache. + + * is_cache_filename: Datatype (str) Ability to name the file + used to store the cached data + + * expires: Datatype (int) Ability to add an expiry timeframe + to the ``is_cache`` argument expressed in seconds, + which allows caching to be recalled or valid over a period + of time in seconds. + + Example:: + + # previous statements + expiry_time = 3600 # number of seconds + jql = "project in (ABC, IT)" + PROJECT.export_issues(jql=jql, extension="json", + expires=expiry_time, is_cache=True) + + * allow_media: Datatype (bool) Ability to add a user + credential to each attachment uri of the "Attachment" column + of a CSV export + + * sub_tasks: Datatype (list) Ability to identify all the + sub-tasks issues present in a JSON export. Useful when you + want to provide issue links between a parent and child issues + + * project_type - Datatype(dict) Ability to provide a project + template for JSON creation based on the project type + + Example:: + + # previous expression + template = { + "software": + "com.pyxis.greenhopper.jira:gh-simplified-scrum-classic"} + PROJECT.export_issues(jql=jql, extension="json", + project_type=template) + + * workflows - Datatype(dict) Ability to provide a map of project + key to workflow scheme name that exist on your destination + instance. + + Example:: + + # previous expression + # Where ITSEC is the project key + workflow = {"ITSEC":"Software Simplified Workflow Scheme"} + PROJECT.export_issues(jql=jql, extension="json", + workflows=workflow) + + * flush - Datatype ( float or int) Ability to set a delay + period required for running threads to shut down. Required + for history extraction. + + .. versionchanged:: 0.7.4 encoding: added keyword argument which helps determine how encoding @@ -1890,31 +2099,111 @@ def export_issues( errors: added keyword argument which helps determine decoding errors are handled + .. versionchanged:: 0.7.6 + + extension: added keyword argument to determine file export format in + CSV or JSON format. + + field_type: added keyword argument to specify if export should contain + all fields or default fields + + exclude_fields: added keyword argument to exclude certain fields from + the exported data in CSV + + workers: added keyword argument to indicate how many threads to use + at a go when making http request + + is_sd_internal: added keyword argument to add additional properties to + a comment field in JSON format. This argument expects that a comment + field must include an addition string attribute as "true" or "false" + specifically tailored for JSM projects. + + merge_files: added keyword argument to merge series of CSV files into + one which are located in the same director fed as a list of file names + + csv_to_json: added keyword argument when supplied a dir path to Jira CSV + file, auto converts to Jira JSON form + + timeout: added keyword argument, used in conjunction with the + ``workers`` argument for threads wait time. + + json_properties: added keyword argument used in JSON export to include + other attributes to the exported file. + + check_auth: added keyword argument used as a flag to turn on or + off Jira auth validation error + + include_fields: added keyword argument used to include only certain + Jira fields in a CSV export. + + date_format: added keyword argument used to parse datetime custom + fields + + json_custom_type: added keyword argument used to exclude certain + custom field type from being added to the export list in JSON + format. + + is_cache: added keyword argument used to cache dict or list result + objects that have used some http request in the past for faster lookup. + + use_cache: added keyword argument used to allow the use of cached + objects. + + is_cache_filename: added keyword argument to name the file used to + store cached data + + expires: added keyword argument used in conjunction to ``is_cache`` + argument to allow the caching to be valid over a given period of time. + + allow_media: added keyword argument for allowing auth to be added to + each media file. + + sub_tasks: added keyword argument for identify sub-task names + + project_type: added keyword argument for project template types + + workflows: added keyword argument for project workflow scheme names + + flush: added keyword argument for delay timing when threads are + still running prior. + :return: None + :raises: IndexError, AttributeError, KeyError, TypeError, ValueError + JiraOneErrors """ from jiraone.exceptions import JiraOneErrors - from jiraone.utils import DotNotation + from jiraone.utils import DotNotation, CUSTOM_FIELD_REGEX, \ + process_executor, DateFormat as df, INWARD_ISSUE_LINK, \ + OUTWARD_ISSUE_LINK from copy import deepcopy from jiraone import field + from datetime import datetime, timedelta import shutil + import random + from time import sleep - reason = LOGIN.get(endpoint.myself()) - if reason.status_code > 300: - add_log( - "Authentication failed.Please check your credential " - "data to determine " - "what went wrong with reason: {} & code {}".format( - reason.reason, reason.status_code - ), - "error", - ) - raise JiraOneErrors( - "login", - "Authentication failed. " - "Please check your credentials." - " Reason: {}".format(reason.reason), - ) + check_auth: bool = ( + kwargs["check_auth"] if "check_auth" in kwargs else True + ) + + if check_auth is True: + reason = LOGIN.get(endpoint.myself()) + if reason.status_code > 300: + add_log( + "Authentication failed.Please check your credential " + "data to determine " + "what went wrong with reason: {} & code {}".format( + reason.reason, reason.status_code + ), + "error", + ) + raise JiraOneErrors( + "login", + "Authentication failed. " + "Please check your credentials." + " Reason: {}".format(reason.reason), + ) # check if the target instance is accessible source: str = LOGIN.base_url target: Union[str, dict] = kwargs["target"] if "target" in kwargs else "" @@ -1930,124 +2219,399 @@ def export_issues( final_file: str = ( kwargs["final_file"] if "final_file" in kwargs else "final_file.csv" ) - encoding: str = kwargs["encoding"] if "encoding" in kwargs else "utf-8" - errors: str = kwargs["errors"] if "errors" in kwargs else "replace" + encoding: str = ( + kwargs["encoding"] if "encoding" in kwargs else "utf-8" + ) + errors: str = ( + kwargs["errors"] if "errors" in kwargs else "replace" + ) + extension: str = ( + kwargs["extension"] if "extension" in kwargs else "csv" + ) + field_type: str = ( + kwargs["field_type"] if "field_type" in kwargs else "all" + ) + exclude_fields: list = ( + kwargs["exclude_fields"] if "exclude_fields" in kwargs else [] + ) + + workers: int = ( + kwargs["workers"] if "workers" in kwargs else 4 + ) + is_sd_internal: bool = ( + kwargs["is_sd_internal"] if "is_sd_internal" in kwargs else False + ) + # This should override live download of the Jira CSV file merge + merge_files: list = ( + kwargs["merge_files"] if "merge_files" in kwargs else [] + ) + # This should override live or supplied merge file and simply process + # the JSON output portion + csv_to_json: str = ( + kwargs["csv_to_json"] if "csv_to_json" in kwargs else "" + ) + + timeout: Union[float, int] = ( + kwargs["timeout"] if "timeout" in kwargs else 2.5 + ) + + json_properties: list = ( + kwargs["json_properties"] if "json_properties" in kwargs else [] + ) + + # Should not be used when ``exclude_fields`` is not empty + include_fields: list = ( + kwargs["include_fields"] if "include_fields" in kwargs else [] + ) + + date_format: str = ( + kwargs["date_format"] if "date_format" in + kwargs else df.dd_MMM_yy_hh_MM_AM_PM + ) + + json_custom_type: list = ( + kwargs["json_custom_type"] if + "json_custom_type" in kwargs else + ["ari:cloud:ecosystem::extension", + "com.atlassian.plugins.atlassian-connect-plugin"] + ) + + is_cache: bool = ( + kwargs["is_cache"] if "is_cache" in + kwargs else True + ) + + use_cache: bool = ( + kwargs["use_cache"] if "use_cache" in + kwargs else False + ) + + is_cache_filename: str = ( + kwargs["is_cache_filename"] if "is_cache_filename" in + kwargs else "j1_config.json" + ) + + expires: int = ( + kwargs["expires"] if "expires" in + kwargs else 3600 + ) + + allow_media: bool = ( + kwargs["allow_media"] if "allow_media" in + kwargs else False + ) + + sub_tasks: list = ( + kwargs["sub_tasks"] if "sub_tasks" in + kwargs else ["Sub-task"] + ) + + project_type: dict = ( + kwargs["project_type"] if "project_type" in + kwargs else { + "software": "com.pyxis.greenhopper.jira:gh-simplified-scrum-classic", + "service_desk": "com.atlassian.servicedesk:simplified-it-service-management", + "business": "com.atlassian.jira-core-project-templates:jira-core-simplified-procurement"} + ) + + workflows: dict = ( + kwargs["workflows"] if "workflows" in + kwargs else {} + ) + + flush: Union[float, int] = ( + kwargs["flush"] if "flush" in + kwargs else 3 + ) # stores most configuration data using a dictionary config = {} - # Checking that the arguments are passing correct data structure. - if not isinstance(_field_names_, list): - add_log( - "The `fields` argument seems to be using the wrong " - "data structure {}" - "expecting a list of items.".format(_field_names_), - "error", - ) - raise JiraOneErrors( - "wrong", - "The `fields` argument should be a " - "list of field names. " - "Detected {} instead.".format(type(_field_names_)), - ) - if not isinstance(target, (str, dict)): - add_log( - "The `target` argument seems to be using the wrong data " - "structure {}" - "expecting a dictionary or a string.".format(target), - "error", - ) - raise JiraOneErrors( - "wrong", - "The `target` argument should be " - "a dictionary of auth items " - "or a string of the url" - "Detected {} instead.".format(type(target)), - ) - if not isinstance(temp_file, str): - add_log( - "The `temp_file` argument seems to be using the wrong " - "data structure {}" - "expecting a string.".format(temp_file), - "error", - ) - raise JiraOneErrors( - "wrong", - "The `temp_file` argument should be " - "a string of the file name." - "Detected {} instead.".format(type(temp_file)), - ) - if not isinstance(final_file, str): - add_log( - "The `final_file` argument seems to be using the wrong " - "data structure {}" - "expecting a string.".format(final_file), - "error", - ) - raise JiraOneErrors( - "wrong", - "The `final_file` argument should be " - "a string of the file name." - "Detected {} instead.".format(type(final_file)), - ) - if not isinstance(jql, str): - add_log( - "The `jql` argument seems to be using the wrong data " - "structure {}" - "expecting a string.".format(jql), - "error", - ) - raise JiraOneErrors( - "wrong", - "The `jql` argument should be a " - "string of a valid Jira query." - "Detected {} instead.".format(type(jql)), - ) - if not isinstance(encoding, str): - add_log( - "The `encoding` argument seems to be using the wrong data " - "structure {}" - "expecting a string.".format(encoding), - "error", - ) - raise JiraOneErrors( - "wrong", - "The `encoding` argument should be a " - "string of a character encoding " - "e.g utf-8." - "Detected {} instead.".format(type(encoding)), - ) - if not isinstance(errors, str): - add_log( - "The `errors` argument seems to be using the wrong data " - "structure {}" - "expecting a string.".format(errors), - "error", - ) - raise JiraOneErrors( - "wrong", - "The `errors` argument should be a " - "string of a character encoding " - "exception e.g. replace." - "Detected {} instead.".format(type(errors)), - ) - if page is None: - pass - elif page is not None: - if not isinstance(page, tuple): + def validate_on_error(name_field: Any = None, + data_type: tuple = None, + err_message: str = None) -> None: + """ + Validate an argument and prepares an error response + + :param name_field: An argument field name + + :param data_type: The data type of the argument, it expects + a datatype object, the name of the argument and a message + which explains the expected object of the argument + + :param err_message: Expected error message + + :return: None + """ + if not isinstance(name_field, data_type[0]): add_log( - "The `page` argument seems to be using the wrong data" - " structure {}" - "expecting a tuple.".format(page), + 'The `{}` argument seems to be using the wrong ' + 'data structure "{}" as value, ' + 'expecting {}.'.format(data_type[1], + name_field, data_type[2]), "error", ) raise JiraOneErrors( "wrong", - "The `page` argument should be a " - "tuple to determine valid page " - "index." - "Detected {} instead.".format(type(page)), + "The `{}` argument should be " + "{}." + "Detected {} instead.".format(data_type[1], err_message, + type(name_field)), + ) + + # Checking that the arguments are passing correct data structure. + def field_value_check( + param_field: list = None, + attr: bool = False, + attr_plus: bool = False + ) -> None: + """ + Helps to perform validation check for ``fields``, + ``exclude_fields`` and ``include_fields`` keyword argument. + + :param param_field: keyword argument names + + :param attr: determines the context for `param_field` argument + value + + :param attr_plus: Adds context for a 3rd parameter + + :return: None + """ + validate_on_error(param_field, (list, + "fields" if attr is False + and attr_plus is False + else "exclude_fields" if attr is True + and attr_plus is False + else "include_fields", + "a list of items"), + "a list of field names in Jira") + + # validate each field name in the list provided + if param_field: + if isinstance(param_field, list): + is_valid = [] + + def map_field(fname: str) -> None: + """Processes an object of a field value + :param fname: A Jira field name + :return: None + """ + + mapper = field.get_field(fname) + if mapper is not None: + _data = mapper.get("name") + config["map_list"].add(_data) + + for item_field in param_field: + process_executor( + map_field, + data=item_field, + workers=1 + ) + + for check_field in param_field: + if check_field not in config["map_list"]: + is_valid.append(check_field) + + if len(is_valid) > 0: + add_log( + 'The following name(s) "{}" in the field value list ' + "doesn't seem to exist or cannot be found.".format( + ",".join(is_valid) + ), + "error", + ) + raise JiraOneErrors( + "value", + "Unable to find initial field, probably such field" + ' "{}" doesn\'t exist for {} argument'.format(",".join(is_valid), + ( + "fields" if attr is False and + attr_plus is False + else "exclude_fields" + if attr is True and attr_plus is False + else "include_fields") + )) + + config["map_list"].clear() + + config["map_list"] = set() + field_value_check(_field_names_) # We'll always check this + field_value_check(exclude_fields, True) + field_value_check(include_fields, False, True) + validate_on_error(target, ((str, dict), "target", + "a dictionary or a string"), + "a dictionary of auth items " + "or a string of the url") + validate_on_error(temp_file, (str, "temp_file", + "a string"), + "a string of the file name") + validate_on_error(final_file, (str, "final_file", + "a string"), + "a string of the file name") + + def check_field_membership( + param_field: str = None, + attr: bool = False, + value_option: list = None + ) -> None: + """ + Checks if an argument is passing the right + data + + :param param_field: A keyword value argument + :param attr: A context decision for param_field value + :param value_option: A membership value to check + :return: None + """ + validate_on_error(param_field, (str, + "field_type" if attr + is False else "extension", + "a string"), + "a string of the query field configuration") + if isinstance(param_field, str): + if param_field.lower() not in value_option: + add_log( + 'The `{}` argument seems to be using the wrong ' + 'option value "{}"' + ' expecting either "{}" or "{}" as option.'.format( + "field_type" if attr is False else "extension", + param_field, + value_option[0], + value_option[1] + ), + 'error', + ) + raise JiraOneErrors( + "wrong", + 'Unrecognized option value in "{}" request' + ' value, only "{}" or "{}" options allowed.'.format( + "field_type" if attr is False else "extension", + value_option[0], + value_option[1] + ) + ) + + check_field_membership(field_type, value_option=["all", "current"]) + validate_on_error(jql, (str, "jql", + "a string"), + "a string of a valid Jira query") + validate_on_error(encoding, (str, "encoding", + "a string"), + "a string of a character " + "encoding e.g utf-8") + validate_on_error(errors, (str, "errors", + "a string"), + "a string of a character encoding " + "exception e.g. replace") + validate_on_error(workers, (int, "workers", + "a number"), + "a number to indicate the worker process") + validate_on_error(is_sd_internal, (bool, "is_sd_internal", + "a boolean"), + "a boolean to indicate true or false") + validate_on_error(merge_files, (list, "merge_files", + "a list"), + "a list of file names which can be merged") + validate_on_error(csv_to_json, (str, "csv_to_json", + "a string"), + "a string of a Jira generated CSV file") + + validate_on_error(timeout, ((float, int), "timeout", + "a number as integer or " + "with a single decimal point"), + "a number to denote the timeout period") + + validate_on_error(json_properties, (list, "json_properties", + "a list of valid JSON properties" + " e.g. users, links or history"), + "a list of valid JSON property for export") + + allowed_props = ["users", "links", "history"] + for x_json_value in json_properties: + if x_json_value.lower() not in allowed_props: + raise JiraOneErrors( + "wrong", + f'Value "{x_json_value}" does not match the allowed options in' + " the ``json_properties`` argument" ) - elif isinstance(page, tuple): + + validate_on_error(date_format, (str, "date_format", + "a str of a date format" + " e.g. %m/%d/%y %I:%M %p" + " which translates to " + " MM/dd/yy h:mm AM"), + "a str of python's date format directive" + " or you can import some common ones from" + " the DateFormat class in jiraone.utils") + + validate_on_error(json_custom_type, (list, "json_custom_type", + "a list of Jira custom field" + " type e.g. com.atlassian.xxx"), + "a list of custom field type available in Jira") + + validate_on_error(is_cache, (bool, "is_cache", + "a boolean of the caching" + " mechanism"), + "a true or false value to the cache mechanism") + + validate_on_error(use_cache, (bool, "use_cache", + "a boolean of the caching" + " mechanism"), + "a true or false value to use the cache mechanism") + + validate_on_error(is_cache_filename, (str, "is_cache_filename", + "a string of the file" + " used for caching"), + "a string used to name the cache file") + + validate_on_error(expires, (int, "expires", + "an integer of the expiry" + " period required for caching"), + "an integer in seconds for the period of " + "caching time") + + validate_on_error(allow_media, (bool, "allow_media", + "an boolean to indicate " + " whether the user's auth should" + " be added to an attachment uri"), + "a boolean to indicate true or false to " + "allow a user's auth to media uri") + + validate_on_error(sub_tasks, (list, "sub_tasks", + "a list of Sub-task issue " + " type name to identify them"), + "a list of names denoting the issue types " + " available within an export") + + validate_on_error(project_type, (dict, "project_type", + "a dictionary of Jira's project " + " type template name"), + "a dict of project type template denoting " + " the name of the project type") + + validate_on_error(workflows, (dict, "workflows", + "a dictionary of Jira's project " + " workflow scheme names"), + "a dict of a workflow scheme name used in " + " Jira") + + validate_on_error(flush, ((float, int), "flush", + "a number to indicate delay " + " timeout period required for running" + " threads to shutdown"), + "a number to indicate what wait time is " + " required for running threads to shutdown") + + check_field_membership(extension, attr=True, + value_option=["csv", "json"]) + if page is None: + pass + elif page is not None: + validate_on_error(page, (tuple, "page", + "a tuple"), + "a tuple to determine valid page index") + if isinstance(page, tuple): fix_point = 0 for index_item in page: answer = "first" if fix_point == 0 else "second" @@ -2066,7 +2630,8 @@ def export_issues( "value" " should be an integer " "to loop page records. " - "Detected {} instead.".format(answer, type(index_item)), + "Detected {} instead.".format(answer, + type(index_item)), ) if fix_point > 1: add_log( @@ -2130,75 +2695,124 @@ def export_issues( active = True source_option["url"] = source LOGIN(**source_option) - rows, total, validate_query = 0, 0, LOGIN.get(endpoint.search_issues_jql(jql)) - if validate_query.status_code < 300: - total = validate_query.json()["total"] - else: - add_log( - "Invalid JQL query received. Reason {} with status code: " - "{} and addition info: {}".format( - validate_query.reason, - validate_query.status_code, - validate_query.json(), - ), - "debug", - ) - raise JiraOneErrors( - "value", - "Your JQL query seems to be invalid" " as no issues were returned.", - ) - calc = int(total / 1000) - # We assume each page is 1K that's downloaded. - limiter, init = total, rows - if page is not None: - assert page[0] > -1, ( - "The `page` argument first " - "range " - "value {}, is lesser than 0 " - "which is practically wrong.".format(page[0]) - ) - assert page[0] <= page[1], ( - "The `page` argument first " - "range " - "value, should be lesser than " - "the second range value of {}.".format(page[1]) - ) - assert page[1] <= calc, ( - "The `page` argument second " - "range " - "value {}, seems to have " - "exceed the issue record range " - "searched.".format(page[1]) - ) + rows, total, validate_query = 0, 0, LOGIN.get( + endpoint.search_issues_jql(jql) + ) + init, limiter = 0, 0 + if csv_to_json != "": + merge_files.append(csv_to_json) + + if not merge_files: + if validate_query.status_code < 300: + total = validate_query.json()["total"] + else: + add_log( + "Invalid JQL query received. Reason {} with status code: " + "{} and addition info: {}".format( + validate_query.reason, + validate_query.status_code, + validate_query.json(), + ), + "debug", + ) + raise JiraOneErrors( + "value", + "Your JQL query seems to be invalid" " as no issues were returned.", + ) + calc = int(total / 1000) + # We assume each page is 1K that's downloaded. + limiter, init = total, rows + if page is not None: + assert page[0] > -1, ( + "The `page` argument first " + "range " + "value {}, is lesser than 0 " + "which is practically wrong.".format(page[0]) + ) + assert page[0] <= page[1], ( + "The `page` argument first " + "range " + "value, should be lesser than " + "the second range value of {}.".format(page[1]) + ) + assert page[1] <= calc, ( + "The `page` argument second " + "range " + "value {}, seems to have " + "exceed the issue record range " + "searched.".format(page[1]) + ) + + limiter = (page[1] + 1) * 1000 + init = page[0] * 1000 + + if exclude_fields and include_fields: + raise JiraOneErrors("wrong", + "The ``exclude_fields`` and ``include_fields`` arguments " + "cannot be used at the same time.") - limiter = (page[1] + 1) * 1000 - init = page[0] * 1000 - print("Downloading issue export in CSV format.") + if extension.lower() == "json": + if exclude_fields: + raise JiraOneErrors( + "wrong", + 'You cannot use the JSON export function if Jira' + ' fields are being excluded. Please remove the ' + ' `exclude_fields` argument.' + ) + elif include_fields: + raise JiraOneErrors( + "wrong", + 'You cannot use the JSON export function if Jira' + ' fields are being included. Please remove the ' + ' `include_fields` argument.' + ) + elif field_type.lower() != "all": + raise JiraOneErrors( + "wrong", + 'You cannot use the JSON export function, if Jira' + ' fields are not exported properly. Please use the "all" option ' + 'in the `field_type` argument or remove it completely.' + ) + print("Downloading issue export in {} format.".format( + extension.upper() + ) + ) file_deposit = [] - while True: - if init >= limiter: - break - file_name = temp_file.split(".")[0] + f"_{init}.csv" - issues = LOGIN.get(endpoint.issue_export(jql, init)) - print( - issues, - issues.reason, - f"::downloading issues at page: " f"{int(init / 1000)}", - "of {}".format(int((limiter - 1) / 1000)), - ) - file_writer( - folder, - file_name, - content=issues.content.decode(encoding, errors=errors), - mark="file", - mode="w+", - ) - # create a direct link to the new file - # ensure that there's a unique list as the names are different. - if file_name not in file_deposit: - file_deposit.append(file_name) - config.update({"exports": file_deposit}) - init += 1000 + + def download_csv() -> None: + """Generate a CSV file from JQL""" + nonlocal init + while True: + if init >= limiter: + break + file_name = temp_file.split(".")[0] + f"_{init}.csv" + issues = (LOGIN.get(endpoint.issue_export(jql, init)) + if field_type.lower() == "all" else + LOGIN.get(endpoint.issue_export( + jql, init, fields="current" + ) + )) + print( + issues, + issues.reason, + f"::downloading issues at page: " f"{int(init / 1000)}", + "of {}".format(int((limiter - 1) / 1000)), + ) + file_writer( + folder, + file_name, + content=issues.content.decode(encoding, errors=errors), + mark="file", + mode="w+", + ) + # create a direct link to the new file + # ensure that there's a unique list as the names are different. + if file_name not in file_deposit: + file_deposit.append(file_name) + config.update({"exports": file_deposit}) + init += 1000 + + download_csv() if not merge_files else config.update({"exports": merge_files}) ( config["prev_list"], @@ -2208,20 +2822,66 @@ def export_issues( config["set_file"], ) = ([], [], [], [], []) + config["is_valid"] = False + # Get an index of all columns within the first file # Then use it across other files in the list - def data_frame(files_=None, activate: bool = True, poll=None, **kwargs) -> None: - """Check each column width of each CSV file - modify it and add the new rows. + def parse_media(uri: str) -> str: + """ + Parse a URL string to include the + credential of the URI - :param files_: A name to files + :param uri: An attachment URI + :return: str + """ + if uri.startswith("http") or uri.startswith("https"): + rem_http = uri.split("://") + user_id = LOGIN.user.split("@") + auth_uri = f"{rem_http[0]}://{user_id[0]}%40{user_id[1]}:" \ + f"{LOGIN.password}@{rem_http[-1]}" + return auth_uri + + def get_pkey_index(pkey: list, key: str, + key_search: list, + attr: bool = False) -> int: + """ + Return the index of the column key - :param activate: A validator + :param pkey: A list of dict values + :param key : A key name to search + :param key_search: An object search name + :param attr: Change context of operations + :return: int + """ - :param poll: A poll data link + for item in pkey: + if attr is True: + if item.get("column_name") == key: + config[key_search[0]][key_search[1]].append( + item.get("column_index") + ) + else: + if item.get("column_name") == key: + config[key_search[0]][key_search[1]] = item.get("column_index") + config["is_valid"] = True + return config[key_search[0]][key_search[1]] + return config[key_search[0]][key_search[1]] + + def data_frame(files_: str = None, + activate: bool = True, + poll: list = None, **kwargs) -> None: + """Check each column width of each CSV file, + get the name of the column and index number + which can be called with `config["headers"]`. + + :param files_: A name to files + + :param activate: A validator + + :param poll: A poll data link, usually a list of items - :param kwargs: Additional arguments + :param kwargs: Additional arguments which can be supplied :return: None """ @@ -2244,7 +2904,7 @@ def data_frame(files_=None, activate: bool = True, poll=None, **kwargs) -> None: # build headers column_headers, headers, max_col_length = [], {}, 0 - def write_files(files_, push: list = None) -> None: + def write_files(files_: str, push: list = None) -> None: """Creates the header file. :param files_: The name to the file @@ -2258,7 +2918,13 @@ def write_files(files_, push: list = None) -> None: # create a temp csv file with the header of the export def create_file(**kwargs) -> None: - """Create multiple files or data points.""" + """ + Create multiple files or data points. + + :param kwargs: Additional supplied arguments + :return: None + """ + nonlocal max_col_length, headers, column_headers data_frame(**kwargs) if push is not None else data_frame(files_) column_headers, headers, max_col_length = ( @@ -2270,7 +2936,7 @@ def create_file(**kwargs) -> None: column_headers.append(header.column_name) max_col_length += 1 - def make_file(modes, data) -> None: + def make_file(modes: str, data: list) -> None: """Writes a list into a File<->like object. :param modes: A writing mode indicator @@ -2295,7 +2961,7 @@ def make_headers_mark() -> None: config["prev_list"] = column_headers def column_check( - first_list: list, second_list: list, _count_: int = 0 + first_list: list, second_list: list, _count_: int = 0 ) -> None: """Determine and defines the column headers. @@ -2309,7 +2975,7 @@ def column_check( """ def column_populate( - name_of_field: str = None, ticker: int = None + name_of_field: str = None, ticker: int = None ) -> None: """Receives a column count list with index. which are inserted and arranged properly @@ -2346,7 +3012,7 @@ def determine_value(value: str) -> int: else: _plus_value = prior_occurrence _main_value = ( - int(abs(next_occurrence - prior_occurrence)) + _plus_value + int(abs(next_occurrence - prior_occurrence)) + _plus_value ) return _main_value @@ -2406,7 +3072,7 @@ def populate_column_data(column_data: list, attr: bool = False) -> list: config["prev_list"] if attr is False else config["next_list"] ) - def load_count(my_list, conf) -> list: + def load_count(my_list: list, conf: list) -> list: """Loads a list into a dictionary of values arranged by their column name. @@ -2462,7 +3128,11 @@ def load_count(my_list, conf) -> list: keep_track = set() # used to keep track of multiple column def bind_us() -> list: - """This helps to align the rows to columns""" + """This helps to align the rows to columns + + :return: List + """ + # where the values are for new_row in other_value: # where we need the values to be @@ -2470,15 +3140,15 @@ def bind_us() -> list: for this_row in my_value: if check_name == 1: if ( - this_row["column_name"] - == new_row["column_name"] + this_row["column_name"] + == new_row["column_name"] ): this_row["column_data"] = new_row["column_data"] break elif check_name > 1: if ( - this_row["column_name"] - == new_row["column_name"] + this_row["column_name"] + == new_row["column_name"] ): if this_row["column_index"] not in keep_track: keep_track.add(this_row["column_index"]) @@ -2556,7 +3226,7 @@ def data_provision(make_item: list, attr: bool = False) -> None: [row for row in make_item] ) # The below adds the values as they are gotten - # From a dictionary object + # from a dictionary object while True: if stop_loop >= finish_loop: break @@ -2611,7 +3281,10 @@ def data_provision(make_item: list, attr: bool = False) -> None: length = max_col_length # keep track of the column width def merge_files() -> None: - """Merge each files and populate it into one file.""" + """Merge each files and populate it into one file. + + :return: None + """ iteration, progress = 0, 0 for files in file_path_directory: current_value = len(file_path_directory) @@ -2628,7 +3301,9 @@ def merge_files() -> None: # write the headers only write_files(files_=files) # add the content into the temp file and populate the rows - file_writer(folder, temp_file, data=payload, mark="many", mode="a+") + file_writer( + folder, temp_file, data=payload, mark="many", mode="a+" + ) else: # process the next list of files here write_files(files_=files, push=payload) @@ -2637,7 +3312,9 @@ def merge_files() -> None: progress += 1 current_progress = 100 * progress / current_value print( - "Processing. " "Current progress: {}%".format(int(current_progress)) + "Processing. " "Current progress: {}%".format( + int(current_progress) + ) ) merge_files() # loop through each file and attempt combination @@ -2666,14 +3343,23 @@ def merge_files() -> None: column_headers.append(header.column_name) max_col_length += 1 - def populate(name) -> None: + def populate(name: str) -> None: + """Creates a field name column index + :param name: A field name + :return: None + """ for _id_ in headers.value: if _id_.column_name == name: field_column.append(_id_.column_index) def reset_fields() -> None: - """Reset field values.""" - nonlocal read_file, start, copy_total, total_, field_list, field_data, cycle, field_column + """Reset field values. + + :return: None + """ + + nonlocal read_file, start, copy_total, total_, field_list, \ + field_data, cycle, field_column read_file, start = file_reader(folder, temp_file, **kwargs), 0 copy_total = deepcopy(read_file) @@ -2682,14 +3368,23 @@ def reset_fields() -> None: field_list, config["fields"], config["saves"] = [], [], [] field_data, cycle, field_column = set(), 0, [] - def check_id(_id, _iter) -> bool: - """Return True if item exist in list.""" + def check_id(_id: int, _iter: list) -> bool: + """Return true if item exist in list. + :param _id: An id in a list + :param _iter: An iterable data + + :return: bool + """ if _id in _iter: return True return False - def check_payload(data) -> Union[dict, list]: - """Return the value of a field.""" + def check_payload(data: Union[dict, list]) -> Union[dict, list]: + """Return the value of a field. + :param data: A data field + + :return: dict or list + """ if isinstance(data, list): # used for sprint fields _data = data[0] @@ -2720,17 +3415,28 @@ def check_payload(data) -> Union[dict, list]: } return _result_ - def get_watchers(name, key): + def get_watchers(name: str, key: Union[str, int]) -> list: + """ + Return a list of watchers + :param name: A watcher field + :param key: An issue key + :return: dict + """ get_issue = field.get_field_value(name, key) get_watch = LOGIN.get(get_issue["self"]).json() return get_watch - def field_change(): - """Recursively check field columns and rewrite values.""" + def field_change() -> None: + """Recursively check field columns and rewrite values. + :return: None + """ nonlocal field_list, start, cycle for columns_ in read_file: - def check_field(): + def check_field() -> None: + """Check field values + :return: None + """ print( "Converting {} name to {} id on outfile from {}".format( names, names, LOGIN.base_url @@ -2761,29 +3467,35 @@ def check_field(): "doesn't exist here".format(names, _field_item) ) - def check_columns(_max_length): + def check_columns(_max_length: int) -> None: + """ + Determines the columns of the CSV file + + :param _max_length: Max length of issue column + :return: None + """ for rows_ in columns_: if _max_length == length: break if start > 0: if ( - check_id(_max_length, field_column) - and rows_ != "" - and cycle == 0 + check_id(_max_length, field_column) + and rows_ != "" + and cycle == 0 ): values = columns_[_max_length] field_data.add(values) if ( - check_id(_max_length, field_column) - and rows_ != "" - and cycle == 1 + check_id(_max_length, field_column) + and rows_ != "" + and cycle == 1 ): get_value = ( [ name.get("field_id") for name in config["fields"] if name.get("field_name") - == columns_[_max_length] + == columns_[_max_length] ] if names != "Watchers" else [ @@ -2791,7 +3503,7 @@ def check_columns(_max_length): for data_field in config["fields"] for name in data_field if name.get("field_name") - == columns_[_max_length] + == columns_[_max_length] ] ) if len(get_value) != 0: @@ -2827,11 +3539,1407 @@ def check_columns(_max_length): except AttributeError as error: sys.stderr.write(f"{error}") - shutil.copy(path_builder(folder, temp_file), path_builder(folder, final_file)) - os.remove(path_builder(folder, temp_file)) - for file in config["exports"]: - path = path_builder(folder, file) - os.remove(path) + def caching(name_field: str, + obj_type: Union[dict, list]) -> None: + """ + Creates a caching check by depositing the time, + instance and jql query that was used during the + last check along with the object data. + Works specifically for JSON export + + :param name_field: The name used to save the cache + :param obj_type: An object that is saved + + :return: None + """ + if os.path.isdir(folder): + file_path = path_builder(folder, is_cache_filename) + data_dump = {name_field: {}} + if os.path.isfile(file_path): + read_json = json.load(open( + file_path, encoding=encoding + )) + if read_json: + data_dump.update(read_json) + data_dump[name_field].update({ + "name": LOGIN.base_url, + "jql": jql, + "value": obj_type, + "time": datetime.strftime( + datetime.astimezone( + datetime.now() + timedelta(seconds=expires) + ), df.YYYY_MM_dd_HH_MM_SS_MS) + }) + json.dump(data_dump, open( + file_path, mode="w+", encoding=encoding), + indent=4, sort_keys=True) + else: + descriptor = os.open(file_path, flags=os.O_CREAT) + os.close(descriptor) + _data_ = {} + json.dump(_data_, open(file_path, mode="w+")) + + def is_file_exist(cache_file: str = None) -> bool: + """ + Checks and updates the object from cache based + on the name it was saved. If found updates + the object and returns true. If not found, returns + false and does nothing. + + :param cache_file: A string of the name used to cache + the object. + + :return: bool + """ + + if os.path.isdir(folder): + file_path = path_builder(folder, is_cache_filename) + if os.path.isfile(file_path): + load_file = json.load(open( + file_path, encoding=encoding)) + if cache_file in load_file: + get_cache_name = load_file[cache_file] + current_time = datetime.today() + parse_time = datetime.strptime( + get_cache_name["time"], df.YYYY_MM_dd_HH_MM_SS_MS + ) + if get_cache_name["name"] == LOGIN.base_url and \ + get_cache_name["jql"] == jql and \ + current_time < parse_time: + if cache_file == "sprint": + config["sprint_data"].update( + get_cache_name["value"] + ) + return True + elif cache_file == "sprint_container": + config["sprint_object_container"].update( + get_cache_name["value"] + ) + return True + elif cache_file == "custom_fields": + config["headers"] = get_cache_name["value"] + return True + elif cache_file == "users": + for _items_ in get_cache_name["value"]: + config["json_userlist"].append(_items_) + return True + return False + + # Verify each field exist in Jira + # Then rewrite the name to be suitable in JSON format + def float_fields(field_names: dict, + regex_pattern: str) -> None: + """ + Submit some Jira field data for a search + and get the field's properties. + + :param field_names: A name to a Jira field + :param regex_pattern: A regex pattern for custom + fields + + :return: None + """ + field_copy = deepcopy(field_names["column_name"]) + config["headers"][field_names["column_index"]][ + "field_column_name"] = field_copy + if field_names["column_name"].startswith("Custom field"): + search_name = re.compile(regex_pattern, re.I) + find_name = search_name.search( + field_names["column_name"] + ) + name_found = None + if find_name is not None: + name_found = find_name.group(2) + + if name_found is not None: + jira_field_name = name_found.lstrip("(").rstrip(")") + map_name = field.get_field( + jira_field_name + ) + + if map_name is None: + pass + else: + config["headers"][field_names["column_index"]][ + "column_name"] = map_name.get("id") + config["headers"][field_names["column_index"]][ + "original_name"] = map_name.get("name") + config["headers"][field_names["column_index"]][ + "customType"] = map_name.get("customType") + + else: + map_name = field.get_field( + field_names["column_name"] + ) + if map_name is None: + map_name = {} + + if "system" in map_name: + config["headers"][field_names["column_index"]][ + "column_name"] = map_name.get("key") + config["headers"][field_names["column_index"]][ + "original_name"] = map_name.get("name") + config["headers"][field_names["column_index"]][ + "customType"] = map_name.get("custom") + + def fetch_field_ids(header_names: list) -> None: + """ + Mutate the field names of Jira to JSON compatible names + + :param header_names: A list of Jira field names + :return: None + """ + + for our_field_name in header_names: + process_executor(float_fields, data=our_field_name, + workers=workers, timeout=timeout, + regex_pattern=CUSTOM_FIELD_REGEX) + + if is_cache is True: + caching("custom_fields", config["headers"]) + + def field_exclude() -> None: + """ + Exclude certain field column from the Jira + CSV export and it's data prior to export + completion + + :return: None + """ + data_frame(files_=temp_file) + exclude_read = file_reader(folder, + temp_file, + skip=True) + file_headers = [] + print("Validating Jira field names") + fetch_field_ids(config["headers"]) + exclude_list, first_run = [], False + for exclude_column in exclude_read: + exclude_data = [] + for ex_header, ex_row in zip(config["headers"], + exclude_column): + if ex_header.get("original_name") not in exclude_fields: + exclude_data.append(ex_row) + if first_run is False: + file_headers.append( + ex_header.get("field_column_name")) + first_run = True + exclude_list.append(exclude_data) + + print("Excluding declared field columns into the CSV file") + file_writer(folder, temp_file, data=[file_headers], + mark="many", mode="w+") + print("Reconstructing file headers") + file_writer(folder, temp_file, data=exclude_list, + mark="many") + + def field_include() -> None: + """ + Include certain field column from the Jira + CSV export and it's data prior to export + completion + + :return: None + """ + data_frame(files_=temp_file) + include_read = file_reader(folder, + temp_file, + skip=True) + file_headers = [] + print("Validating Jira field names") + fetch_field_ids(config["headers"]) + include_list, first_run = [], False + for include_column in include_read: + include_data = [] + for in_header, in_row in zip(config["headers"], + include_column): + if in_header.get("original_name") in include_fields: + include_data.append(in_row) + if first_run is False: + file_headers.append( + in_header.get("field_column_name") + ) + first_run = True + include_list.append(include_data) + + print("Including declared field columns into the CSV file") + file_writer(folder, temp_file, data=[file_headers], + mark="many", mode="w+") + print("Reconstructing file headers") + file_writer(folder, temp_file, data=include_list, + mark="many") + + def extend_format(ext: str = None) -> None: + """ + Differentiate between the format file to render + + :param ext: A format type to render + :return: None + """ + nonlocal final_file + + def extend_file_type() -> None: + """ + Determines the file name when ``final_file`` argument is + used. + + :return: None + """ + nonlocal final_file + if ext.lower() == "csv": + if not final_file.endswith(".csv"): + final_file = final_file + ".csv" + shutil.copy(path_builder(folder, temp_file), + path_builder(folder, final_file)) + elif ext.lower() == "json": + if not final_file.endswith(".json"): + final_file = final_file + ".json" + json.dump(config["json_build"], + open(path_builder(folder, final_file), + mode="w+", encoding=encoding), + indent=4, sort_keys=True) + os.remove(path_builder(folder, temp_file)) + + def user_extraction() -> None: + """ + Extracts and retains Jira users + and groups lookup until program finish executing + + :return: list + """ + + def export_users(_export_data_: dict) -> None: + """ + Perform an export of users + :param _export_data_: An iterable item of data + :return: None + """ + nonlocal _start + + def export_groups(user_account: str) -> list: + """ + Exports a list of group a user is in. + :param user_account: + :return: + """ + _group_holder_ = [] + _user_group_ = LOGIN.get(endpoint.get_user_group(user_account)) + if _user_group_.status_code < 300: + _group_data_ = _user_group_.json() + for user_group in _group_data_: + _group_holder_.append(user_group.get("name")) + + return _group_holder_ + + _data_ = { + "display_name": _export_data_["displayName"], + "account_id": _export_data_["accountId"] if LOGIN.api is True else + _export_data_["name"], + "active": _export_data_["active"], + "account_type": _export_data_.get("accountType"), + "groups": export_groups(_export_data_["accountId"]), + "email": _export_data_.get("emailAddress") + } + config["json_userlist"].append( + _data_ + ) + + _start, _max_result_ = 0, 1000 + + while True: + user_export = LOGIN.get(endpoint.search_users( + _start, _max_result_) + ) + if user_export.status_code < 300: + _user_data_ = user_export.json() + for _user_item_ in _user_data_: + process_executor( + export_users, + data=_user_item_, + workers=workers, + timeout=timeout + ) + if not _user_data_: + break + _start += _max_result_ + + if is_cache is True: + caching("users", config["json_userlist"]) + + def link_issue_extraction(linked_object: dict, + regex_pattern_in: str, + regex_pattern_out: str) -> None: + """ + Extracts the linked issues according to + issue key data in the export + + :param linked_object: A dict of the column header + :param regex_pattern_in: A regular expression pattern + for issue links inwards + :param regex_pattern_out: A regular expression pattern + for issue links outwards + + :return: None + """ + if linked_object["column_name"].startswith("Inward issue link"): + search_name = re.compile(regex_pattern_in, re.I) + find_name = search_name.search( + linked_object["column_name"] + ) + name_found = None + if find_name is not None: + name_found = find_name.group(2) + + if name_found is not None: + jira_field_name = name_found.lstrip("(").rstrip(")") + map_name = jira_field_name + + config["headers"][linked_object["column_index"]][ + "linked_name"] = map_name + elif linked_object["column_name"].startswith("Outward issue link"): + search_name = re.compile(regex_pattern_out, re.I) + find_name = search_name.search( + linked_object["column_name"] + ) + name_found = None + if find_name is not None: + name_found = find_name.group(2) + + if name_found is not None: + jira_field_name = name_found.lstrip("(").rstrip(")") + map_name = jira_field_name + + config["headers"][linked_object["column_index"]][ + "linked_name"] = map_name + + def json_field_builder() -> None: + """Builds a JSON representation of fields in Jira + + :return: None + """ + # A blueprint of a JSON template + json_project_template = {"projects": []} + json_linked_issues_template = {"links": []} + json_user_template = {"users": []} + json_history_template = {"history": []} + # declare the base template of the Jira JSON structure + config["json_build"], config["save_point"], \ + config["sprint_data"], config["issuekey_data"] = {}, {}, {}, {} + (config["projecttype_data"], config["projecturl_data"], + config["projectdescription_data"], + config["projectlead_data"], + config["projectname_data"]) = ({}, {}, {}, {}, {}) + config["sprint_data"]["col_name_index"] = [] + # Start the first project save point + config["save_point"]["col_name_index"] = 0 + config["issuekey_data"]["col_name_index"] = 0 + config["projecttype_data"]["col_name_index"] = 0 + config["projectdescription_data"]["col_name_index"] = 0 + config["projecturl_data"]["col_name_index"] = 0 + config["projectlead_data"]["col_name_index"] = 0 + config["projectname_data"]["col_name_index"] = 0 + (config["sprint_object_container"], + config["json_userlist"]) = ({}, []) + data_frame(files_=temp_file) + + try: + get_pkey_index(config["headers"], "Project key", + ["save_point", "col_name_index"]) + assert config["is_valid"] is True, "Unable to find required field " \ + "in header column e.g. Project key" + except AssertionError as err: + os.remove(path_builder(folder, temp_file)) + add_log( + f"{err} on line {err.__traceback__.tb_lineno}" + f"{sys.__excepthook__(Exception, err, err.__traceback__)}", + "error") + exit(err) + + print("Converting Jira custom field names to Jira JSON compatible names.") + + try: + + if use_cache is True: + if is_file_exist("custom_fields"): + print("Using cached data of custom fields") + else: + fetch_field_ids(config["headers"]) + else: + fetch_field_ids(config["headers"]) + except (KeyError, AttributeError, ValueError, IndexError, + TypeError) as err: + os.remove(path_builder(folder, temp_file)) + add_log( + f"{err} on line {err.__traceback__.tb_lineno} " + f"{sys.__excepthook__(Exception, err, err.__traceback__)}", + "error") + exit(f"An error has occurred: {err}") + + # caching sprints + if use_cache is True: + if is_file_exist("sprint"): + print("Using cached data for sprint values") + else: + get_pkey_index(config["headers"], "Sprint", + ["sprint_data", "col_name_index"], + attr=True) + if is_cache is True: + caching("sprint", config["sprint_data"]) + else: + get_pkey_index(config["headers"], "Sprint", + ["sprint_data", "col_name_index"], + attr=True) + if is_cache is True: + caching("sprint", config["sprint_data"]) + + get_pkey_index(config["headers"], "Issue key", + ["issuekey_data", "col_name_index"]) + get_pkey_index(config["headers"], "Project type", + ["projecttype_data", "col_name_index"]) + get_pkey_index(config["headers"], "Project description", + ["projectdescription_data", "col_name_index"]) + get_pkey_index(config["headers"], "Project url", + ["projecturl_data", "col_name_index"]) + get_pkey_index(config["headers"], "Project lead id" if + LOGIN.api is True else "Project lead", + ["projectlead_data", "col_name_index"]) + get_pkey_index(config["headers"], "Project name", + ["projectname_data", "col_name_index"]) + read_csv_file = file_reader(folder, temp_file, skip=True) + # generate user list + print("Verifying users and group membership") if use_cache is False \ + else print("Looking up users and group from cache") + if use_cache is True: + if is_file_exist("users"): + print("Using cache to verify users and groups") + else: + print("Cached expired, defaulting to live search.") + user_extraction() + else: + user_extraction() + + if "links" in [x.lower() for x in json_properties]: + print("Verifying linked issues from issuelink types") + for links in config["headers"]: + link_issue_extraction(links, + INWARD_ISSUE_LINK, + OUTWARD_ISSUE_LINK) + + print("Verifying Sprint values.") + get_sprint_obj = deepcopy(read_csv_file) + sprint_custom_id, sprint_cf = field.search_field("Sprint"), None + for sprint_item in get_sprint_obj: + for sub_sprint in config["sprint_data"]["col_name_index"]: + if sprint_item[sub_sprint]: + config["sprint_object_container"].update({ + f"{sprint_item[sub_sprint]}": [] + + }) + + if "customType" in sprint_custom_id: + if sprint_custom_id["customType"].endswith("gh-sprint"): + extract = sprint_custom_id["id"].split("_") + sprint_cf = f"cf[{extract[1]}]" + + def name_to_user_id(user_value: str) -> dict: + """ + Returns an account_id or userid of a User object + + :param user_value: Convert a display name to acceptable + username or accountId + + :return: dict + """ + + _user_value_list, _profile_data_ = [], None + for _user_names_ in config["json_userlist"]: + if user_value == _user_names_["display_name"]: + _profile_data_ = { + "account_id": _user_names_["account_id"], + "display_name": _user_names_["display_name"], + "active": _user_names_["active"], + "groups": _user_names_["groups"], + "email": _user_names_.get("email"), + } + _user_value_list.append(_profile_data_) + + if not _user_value_list: + return {"account_id": None} + elif len(_user_value_list) == 1: + return _profile_data_ + elif len(_user_value_list) > 1: + # Since we're finding these users by display name + # if multiple users with the same name exist, we want to + # take a calculated guess + guess = random.choices( + _user_value_list, [float(_user_value_list.index( + each_user) + 0.5) for each_user in _user_value_list]) + return guess[0] + + def search_sprints(sprint_value: str) -> None: + """ + Search for sprint id from Jira issues + + :param sprint_value: A Sprint value + + :return: None + """ + _search_ = LOGIN.get( + endpoint.search_issues_jql(f'{sprint_cf} = "{sprint_value}"') + ) + if _search_.status_code < 300: + _search_results_ = _search_.json()["issues"] + for keys in _search_results_: + if "key" in keys: + key_ = keys["key"] + _search_issue = LOGIN.get( + endpoint.issues(key_) + ) + if _search_issue.status_code < 300: + _issue_results_ = _search_issue.json()["fields"] + sprint_field = _issue_results_[sprint_custom_id["id"]] + if sprint_field: + for sprint_item_ in sprint_field: + if sprint_item_.get("name") in config["sprint_object_container"]: + sprint_data = { + "name": sprint_item_.get("name"), + "state": sprint_item_.get("state"), + "startDate": sprint_item_.get("startDate"), + "endDate": sprint_item_.get("endDate"), + "completeDate": sprint_item_.get("completeDate"), + "rapidViewId": sprint_item_.get("boardId") + } + if sprint_item_.get("name") not in config[ + "sprint_object_container"][ + sprint_item_.get("name") + ]: + config["sprint_object_container"][sprint_item_.get( + "name")].append( + sprint_data + ) + break + + if is_cache is True: + caching("sprint_container", config["sprint_object_container"]) + + print("Extracting Sprint Ids from values.") if use_cache is False \ + else print("Looking up Sprint values from cache.") + if use_cache is True: + if is_file_exist("sprint_container"): + print("Using Sprint Ids from cached values") + else: + print("Cached expired, defaulting to live search.") + for sprint_key, sprint_val in config["sprint_object_container"].items(): + search_sprints(sprint_key) + process_executor(search_sprints, + data=sprint_key, + workers=2, timeout=timeout) + + else: + for sprint_key, sprint_val in config["sprint_object_container"].items(): + search_sprints(sprint_key) + process_executor(search_sprints, + data=sprint_key, + workers=2, timeout=timeout) + + project_settings = {} + project_config = {} + + def field_builder(bundle: list = None, + col_name: list = None, + ) -> None: + """Takes a bundle which is a list and extracts values + + :param bundle: A data list to process + :param col_name: A list of the column names + + :return: None + """ + nonlocal my_index, do_once + + def start_process() -> None: + """ + Initiates the JSON conversion process + + :return: None + """ + nonlocal my_index, do_once + json_customfield_template = {"customFieldValues": []} + json_customfield_sub_template = {"value": []} + json_attachment_template = {"attachments": []} + json_comment_template = {"comments": []} + json_worklog_template = {"worklogs": []} + json_labels_template = {"labels": []} + json_watchers_template = {"watchers": []} + json_component_template = {"components": []} + json_fixversion_template = {"fixedVersions": []} + json_affectversion_template = {"affectedVersions": []} + data, issue_data, issue_temp = {}, {}, {"issues": []} + sprint_issue_data, cf_multi_field = [], {} + + def parse_sla_fields(time_value: str) -> Union[str, None]: + """ + Convert the datetime string into compatible + d/MMM/yy + + :param time_value: A string of datetime data + :return: str + """ + time_val = None + if time_value is None or time_value == "": + pass + else: + time_val = datetime.strptime( + time_value, df.YYYY_MM_dd_HH_MM_SS_MS + ) + return time_val.strftime(df.dd_MMM_yy) + return time_val + + def parse_duration(time_value: int = 0) -> str: + """ + Parse time value into ISO_8601 durations + source: https://en.wikipedia.org/wiki/ISO_8601#Durations + Using seconds for every tiem value. + + :param time_value: A time estimate value + :return: str + """ + time_val = "PT0S" + if time_value is None or time_value == "": + pass + else: + time_val = f"PT{time_value}S" + return time_val + + def parse_dates(date_value: str, + date_pattern: str = date_format, + index_level: int = 0, + end_format: str = None + ) -> Union[str, None]: + """ + Parse date format into Jira JSON acceptable format + + :param date_value: A date string value + :param date_pattern: A format for datetime + :param index_level: Level of index + :param end_format: An end format for the date field + + :return: datetime string value + """ + + new_date = None + if date_value == "" or date_value is None: + pass + else: + try: + new_date = datetime.strptime(date_value, date_pattern) + return new_date.strftime( + df.YYYY_MM_dd_HH_MM_SS_MS_TZ if end_format is None \ + else end_format + ) + except ValueError: + guess_format = [ + df.dd_MMM_yy_hh_MM_AM_PM, + df.dd_MM_yy_hh_MM_AM_PM, + df.dd_MMM_YYYY_hh_MM_SS_AM_PM, + df.YYYY_MM_dd_T_HH_MM_SS_MS, + df.MM_dd_yy_space_hh_MM_AM_PM, + df.dd_MM_YYYY_space_hh_MM_AM_PM, + df.MM_dd_yy_hh_MM_AM_PM, + df.MMM_dd_YYYY_hh_MM_SS_AM_PM, + df.YYYY_MM_dd_hh_MM_SS_AM_PM, + df.dd_MM_YYYY_hh_MM_SS_AM_PM, + df.MM_dd_YYYY_hh_MM_AM_PM, + "Invalid format" + ] + limit = len(guess_format) + count = index_level + current_value = date_value + for _pattern_ in guess_format[index_level:]: + count += 1 + if count >= limit: + raise JiraOneErrors( + "wrong", + "Unable to determine date_format for" + f" value {current_value}" + ) + parse_dates(current_value, _pattern_, count) + + return new_date + + for obj_value, obj_name in zip(bundle, col_name): + # Establish an updated reference of the project list + # and their object configurations above + + def check_customtype(custom: list, + type_obj: dict) -> bool: + """ + Return true or false for valid custom type + + :param custom: a list of custom types + :param type_obj: An object type + :return: bool + """ + for custom_type in custom: + if type_obj.get("customType").startswith(custom_type): + return True + return False + + def produce_custom_type(type_object: dict, + type_value: Any) -> dict: + """" + Determine object type for custom field based + on custom field type + :param type_object: A dict representation of + the custom field + :param type_value: A value of the custom field + type + :return: dict + """ + _data_ = None + + def multi_drop_data(type_obj: dict) -> dict: + """ + Mutate data to avoid object duplication + :param type_obj: An object data + + :return: dict + """ + _cf_data_ = { + "fieldName": type_obj.get("original_name"), + "fieldType": type_obj.get("customType"), + "value": cf_multi_field.get( + type_obj.get("original_name")).get("value") + } + return _cf_data_ + + def multi_drop_check(_value_data_: Any, + type_obj: dict) -> None: + """Mutates data into multiple list value + :param _value_data_: A data value + :param type_obj: An object data + :return: None + """ + for _items_ in json_customfield_template["customFieldValues" + ]: + if _items_.get("fieldName") == type_obj.get( + "original_name"): + if _value_data_ is None or _value_data_ == "": + pass + else: + _items_.get("value").append( + _value_data_ if not type_obj.get( + "customType").endswith( + "multiuserpicker" + ) else name_to_user_id( + _value_data_ + ).get("account_id") + ) + + if type_object.get("customType").endswith("multicheckboxes") \ + or type_object.get("customType").endswith("multiuserpicker") \ + or type_object.get("customType").endswith("labels"): + + if type_object.get("original_name") not in [ + d.get("fieldName") for d in json_customfield_template[ + "customFieldValues"]]: + + if not hasattr(cf_multi_field, type_object.get("original_name")): + if type_value is None or type_value == "": + cf_multi_field.update( + {type_object.get("original_name"): { + "fieldName": type_object.get("original_name"), + "fieldType": type_object.get("customType"), + "value": [] + } + } + ) + else: + cf_multi_field.update( + {type_object.get("original_name"): { + "fieldName": type_object.get("original_name"), + "fieldType": type_object.get("customType"), + "value": [type_value] if not type_object.get( + "customType").endswith( + "multiuserpicker") else [ + name_to_user_id( + type_value + ).get("account_id")] + } + } + ) + _data_ = multi_drop_data(type_object) + + else: + if type_value is None or type_value == "": + multi_drop_check(type_value, type_object) + else: + multi_drop_check(type_value, type_object) + + elif type_object.get("customType").endswith("datetime"): + _data_ = { + "fieldName": type_object.get("original_name"), + "fieldType": type_object.get("customType"), + "value": parse_dates(type_value, df.dd_MMM_yy_hh_MM_AM_PM, + end_format=df.dd_MMM_yy_hh_MM_AM_PM) + } + elif type_object.get("customType").endswith("userpicker"): + _data_ = { + "fieldName": type_object.get("original_name"), + "fieldType": type_object.get("customType"), + "value": name_to_user_id(type_value).get("account_id") + } + elif type_object.get("customType").endswith("firstresponsedate"): + _data_ = { + "fieldName": type_object.get("original_name"), + "fieldType": type_object.get("customType"), + "value": parse_sla_fields(type_value) + } + elif type_object.get("customType").endswith("cascadingselect"): + cascade = type_value.split("->") + cascade_obj = {} + if len(cascade) > 1: + cascade_obj.update({"": cascade[0].strip(" "), + "1": cascade[1].strip(" ")}) + sub_cf_copy = deepcopy(cascade_obj) + _data_ = { + "fieldName": type_object.get("original_name"), + "fieldType": type_object.get("customType"), + "value": sub_cf_copy + } + else: + _data_ = { + "fieldName": type_object.get("original_name"), + "fieldType": type_object.get("customType"), + "value": type_value + } + else: + if check_customtype(json_custom_type, type_object): + pass + else: + _data_ = { + "fieldName": type_object.get("original_name"), + "fieldType": type_object.get("customType"), + "value": type_value + } + + return _data_ + + def sprint_extract(type_value: Any) -> None: + """ + Extracts a sprint object + :param type_value: A value of an object + + :return: None + """ + + if type_value in config["sprint_object_container"]: + if config["sprint_object_container"][type_value]: + prep_obj = config["sprint_object_container"][type_value][-1] + sprint_issue_data.append(prep_obj) + + # Keep track of each project key and their objects + if bundle[config["save_point"]["col_name_index"]] \ + not in project_settings: + my_index += 1 + + my_bundle = {bundle[config["save_point"]["col_name_index"]]: + bundle[config["save_point"]["col_name_index"]]} + my_bundle_index = { + bundle[config["save_point"]["col_name_index"]]: my_index} + project_name = bundle[config["projectname_data"]["col_name_index"]] if \ + config["projectname_data"]["col_name_index"] > 0 else None + project_settings.update(my_bundle) + project_config.update(my_bundle_index) + cf_versions, cf_components = {"versions": []}, {"components": []} + _project_keys_ = bundle[config["save_point"]["col_name_index"]] + get_versions = LOGIN.get(endpoint.get_project_versions( + id_or_key=_project_keys_) + ) + get_components = LOGIN.get(endpoint.get_project_component( + id_or_key=_project_keys_) + ) + if get_versions.status_code < 300: + for version in get_versions.json(): + _data = { + "name": version.get("name", ""), + "released": version.get("released", ""), + "releaseDate": version.get("releaseDate", "") + } + cf_versions["versions"].append(_data) + if get_components.status_code < 300: + for component in get_components.json(): + lead = None + if "lead" in component: + lead = component.get("lead").get("accountId") + _data = { + "name": component.get("name", ""), + "description": component.get("description", "") + } if lead is None else { + "name": component.get("name", ""), + "description": component.get("description", ""), + "lead": lead + } + cf_components["components"].append(_data) + + issue_temp.update({"key": bundle[ + config["save_point"]["col_name_index"]]}) + issue_temp.update({"name": project_name}) + issue_temp.update(cf_versions) + issue_temp.update(cf_components) + issue_temp.update({"type": bundle[ + config["projecttype_data"]["col_name_index"]] if + config["projecttype_data"]["col_name_index"] > 0 else None}) + issue_temp.update( + {"template": project_type.get("software") if issue_temp["type"] == "software" \ + else project_type.get("service_desk") if issue_temp["type"] == \ + "service_desk" else project_type.get( + "business")} + ) + issue_temp.update( + {"url": bundle[config["projecturl_data"]["col_name_index"]] if + config["projecturl_data"]["col_name_index"] > 0 else None}) + issue_temp.update( + {"description": bundle[ + config["projectdescription_data"]["col_name_index"]] if + config["projectdescription_data"]["col_name_index"] > 0 else None}) + issue_temp.update( + {"lead": bundle[config["projectlead_data"]["col_name_index"]] if + config["projectlead_data"]["col_name_index"] > 0 else None}) + if workflows: + issue_temp.update({ + "workflowSchemeName": workflows[ + _project_keys_] if _project_keys_ in + workflows else "Sample project workflow" + }) + issue_temp.update({"externalName": project_name}) + json_project_template["projects"].append( + issue_temp) + + # Dynamically get a sub-item into custom field values + # Below condition for arranging field values + if not obj_name.get("column_name").startswith("custom"): + if obj_name.get("column_name").lower().startswith("comment"): + issue_comment = obj_value.split(";") + if len(issue_comment) > 1: + _data = { + "created": parse_dates(issue_comment[0]), + "author": issue_comment[1], + "body": issue_comment[2], + } + if is_sd_internal is True: + _data.update( + { + "properties": [ + {"key": "sd.public.comment", "value": + {"internal": issue_comment[3]}}] + } + ) + json_comment_template["comments"].append(_data) + + elif obj_name.get("column_name").lower().startswith("attachment"): + attacher = obj_value.split(";") + if len(attacher) > 1: + _data = { + "created": parse_dates(attacher[0]), + "attacher": attacher[1], + "name": attacher[2], + "uri": attacher[3], + } + json_attachment_template["attachments"].append(_data) + elif obj_name.get("column_name").startswith("worklog"): + worklog = obj_value.split(";") + if len(worklog) > 1: + _data = { + "startDate": parse_dates(worklog[1]), + "author": worklog[2], + "timeSpent": parse_duration(worklog[3]), + } + json_worklog_template["worklogs"].append(_data) + + elif obj_name.get("column_name").startswith("labels"): + if obj_value == "" or obj_value is None: + pass + else: + json_labels_template["labels"].append(obj_value) + + elif obj_name.get("column_name").startswith("Inward issue link"): + if obj_value == "" or obj_value is None: + pass + else: + json_linked_issues_template["links"].append( + { + "name": obj_name.get("linked_name"), + "sourceId": obj_value, + "destinationId": bundle[config["issuekey_data"]["col_name_index"]] + } + ) + elif obj_name.get("column_name").startswith("Outward issue link"): + if obj_value == "" or obj_value is None: + pass + else: + json_linked_issues_template["links"].append( + { + "name": obj_name.get("linked_name"), + "destinationId": obj_value, + "sourceId": bundle[config["issuekey_data"]["col_name_index"]] + } + ) + elif obj_name.get("column_name").startswith("Watchers Id"): + if obj_value == "" or obj_value is None: + pass + else: + json_watchers_template["watchers"].append(obj_value) + elif obj_name.get("column_name").startswith("components"): + if obj_value == "" or obj_value is None: + pass + else: + json_component_template["components"].append(obj_value) + elif obj_name.get("column_name").startswith("fixVersions"): + if obj_value == "" or obj_value is None: + pass + else: + json_fixversion_template["fixedVersions"].append(obj_value) + elif obj_name.get("column_name").startswith("affectedVersions"): + if obj_value == "" or obj_value is None: + pass + else: + json_affectversion_template["affectedVersions"].append(obj_value) + elif obj_name.get("column_name").startswith("Issue id"): + issue_id = obj_value + issue_data.update({"externalId": issue_id}) + elif obj_name.get("column_name").startswith("Sprint"): + if obj_value == "" or obj_value is None: + pass + else: + sprint_extract(obj_value) + + else: + if obj_value is None or obj_value == "": + pass + else: + data.update({ + obj_name.get("column_name"): obj_value + }) + issue_data.update(data) + else: + _data = produce_custom_type(obj_name, obj_value) + if _data is not None: + if "value" in _data: + if _data["value"] is None or _data["value"] == "": + pass + else: + json_customfield_template["customFieldValues"].append( + _data + ) + json_customfield_sub_template["value"].clear() + + # Only include sprint data if not empty + if sprint_issue_data: + json_customfield_template["customFieldValues"].append( + { + "fieldName": "Sprint", + "fieldType": field.field_type.get("sprint"), + "value": sprint_issue_data + } + ) + + # remove custom field id generated and issue links + value_to_delete = [] + for _issue_key, _issue_value in issue_data.items(): + if _issue_key.startswith("Custom field"): + value_to_delete.append(_issue_key) + if _issue_key.startswith("Inward issue link"): + value_to_delete.append(_issue_key) + if _issue_key.startswith("Outward issue link"): + value_to_delete.append(_issue_key) + + for _vals_ in value_to_delete: + del issue_data[_vals_] + + # Add each field value to the issue data object + issue_data.update(json_customfield_template) + issue_data.update(json_comment_template) + issue_data.update(json_attachment_template) + issue_data.update(json_watchers_template) + issue_data.update(json_labels_template) + issue_data.update(json_worklog_template) + issue_data.update(json_component_template) + issue_data.update(json_fixversion_template) + issue_data.update(json_affectversion_template) + + # perform field copy and rewrites for system known fields + issue_data["timeSpent"] = parse_duration(issue_data.get("timespent")) + issue_data["originalEstimate"] = parse_duration( + issue_data.get("timeoriginalestimate") + ) + issue_data["estimate"] = parse_duration(issue_data.get("timeestimate")) + issue_data["issueType"] = issue_data["issuetype"] + issue_data["resolutionDate"] = parse_dates(issue_data.get("resolutiondate")) + if LOGIN.api is True: + issue_data["assignee"] = issue_data.get("Assignee Id") + issue_data["reporter"] = issue_data.get("Reporter Id") + issue_data["creator"] = issue_data.get("Creator Id") + if "created" in issue_data: + issue_data["created"] = parse_dates(issue_data.get("created")) + if "Created" in issue_data: + issue_data["created"] = parse_dates(issue_data.get("Created")) + issue_data["updated"] = parse_dates(issue_data.get("updated")) + issue_data["duedate"] = parse_dates(issue_data.get("duedate")) + + # project configuration data + issue_data["projectDescription"] = issue_data.get( + "Project description") + issue_data["projectKey"] = issue_data.get("Project key") + issue_data["projectLead"] = issue_data.get("Project lead id") + issue_data["projectName"] = issue_data.get("Project name") + issue_data["projectType"] = issue_data.get("Project type") + issue_data["projectUrl"] = issue_data.get("Project url") + issue_data["id"] = issue_data.get("externalId") + issue_data["parent"] = issue_data.get("Parent") + issue_data["key"] = issue_data.get("Issue key") + issue_data["epicLinkSummary"] = issue_data.get("Epic Link Summary") + issue_data["statusCategory"] = issue_data.get("Status Category") + issue_data["parentSummary"] = issue_data.get("Parent summary") + # perform deletion of unused fields + items_to_delete = ["Project lead id", "Project lead", + "Project description", + "Project key", "Project name", + "Project type", "Status Category", + "Project url", "Issue id", "watches", + "workratio", "versions", + "Created", "Creator", "timeestimate", + "issuetype", "resolutiondate", + "timeoriginalestimate", "timespent", + "Parent", "Parent id", "Epic Link Summary", + "Parent summary", "Issue key"] + for del_item in items_to_delete: + if del_item in issue_data: + del issue_data[del_item] + + if issue_data["timeSpent"] == "": + del issue_data["timeSpent"] + if issue_data["originalEstimate"] == "": + del issue_data["originalEstimate"] + if issue_data["duedate"] == "" or \ + issue_data["duedate"] is None: + del issue_data["duedate"] + if issue_data["estimate"] == "" or \ + issue_data["estimate"] is None: + del issue_data["estimate"] + if issue_data["resolutionDate"] == "" or \ + issue_data["resolutionDate"] is None: + del issue_data["resolutionDate"] + if LOGIN.api is True: + user_attr = ["Assignee Id", "Reporter Id", + "Creator Id"] + for attr in user_attr: + if attr in issue_data: + del issue_data[attr] + if not issue_data["worklogs"]: + del issue_data["worklogs"] + + # appending all issues data to each issue list per + # project key + project_index = project_config[ + bundle[config["save_point"]["col_name_index"]]] + json_project_template["projects"][ + project_index]["issues"].append(issue_data) + + start_process() + + my_index, do_once = -1, False + # Begin the JSON conversion process + print("JSON conversion started.") + try: + for name_of_fields in read_csv_file: + field_builder( + name_of_fields, + config["headers"] + ) + except (IndexError, KeyError, TypeError, + AttributeError, ValueError, JiraOneErrors) as err: + os.remove(path_builder(folder, temp_file)) + add_log( + f"{err} on line {err.__traceback__.tb_lineno}" + f" with {err.__traceback__} " + f"{sys.__excepthook__(Exception, err, err.__traceback__)}", + "error") + exit(f"An error has occurred: {err}") + + config["json_build"].update(json_project_template) + + def parse_history_data(history_key: str) -> None: + """ + Parse some history payload and process + some object with list of items about the + history + + :param history_key: A Jira issue key + :return: None + """ + query = f"key = {history_key}" + history_folder = f"{folder}/history" + history_file = f"history_{history_key}.csv" + PROJECT.change_log(folder=history_folder, allow_cp=False, + file=history_file, + jql=query, show_output=False) + read_history = file_reader(history_folder, + history_file, + skip=True) + history_data = [] + for _history_ in read_history: + name_mapper = { + "issueKey": _history_[0], + "summary": _history_[1], + "author": _history_[2], + "created": _history_[3], + "fieldType": _history_[4], + "field": _history_[5], + "fieldId": _history_[6], + "from_": _history_[7], + "fromString": _history_[8], + "to_": _history_[9], + "toString": _history_[10], + "fromAccountId": _history_[11], + "toAccountId": _history_[12] + } + mapped = DotNotation(name_mapper) + _history_data_ = { + "author": name_to_user_id( + mapped.author).get("account_id"), + "created": mapped.created, + "items": [ + { + "fieldType": mapped.fieldType, + "field": mapped.field, + "from": mapped.from_ or None, + "fromString": mapped.fromString or None, + "to": mapped.to_ or None, + "toString": mapped.toString or None + } + ] + } + history_data.append(_history_data_) + + json_history_template["history"].append( + { + "key": history_key, + "value": history_data + } + ) + + os.remove(path_builder(history_folder, history_file)) + + # adjust sub-task link + def run_multi_check(parent_key: str) -> str: + """ + Run a search on the json build object until + you can find a parent issue, if not return + empty string + + :param parent_key: An issue number + :return: str + """ + for some_issue in config["json_build"]["projects"]: + another_issue = some_issue["issues"] + for now_issue in another_issue: + now_id = now_issue.get("id") + now_issue_key = now_issue.get("key") + if parent_key == now_id: + return now_issue_key + return "" + + for sub_task_check in config["json_build"]["projects"]: + issues = sub_task_check["issues"] + for issue in issues: + if issue.get("issueType") in sub_tasks: + get_parent = issue.get("parent") + get_sub_task_key = issue.get("key") + linked_parent = run_multi_check(get_parent) + json_linked_issues_template["links"].append( + { + "name": "jira_subtask_link", + "destinationId": get_sub_task_key, + "sourceId": linked_parent + } + ) + + if json_properties: + if "links" in [x.lower() for x in json_properties]: + print("Adding linked issues to the export") + + config["json_build"].update( + json_linked_issues_template + ) + + if "users" in [x.lower() for x in json_properties]: + print("Updating users and group to the export") + for names_of_users in config["json_userlist"]: + usernames = { + "name": names_of_users.get("account_id"), + "fullname": names_of_users.get("display_name"), + "active": names_of_users.get("active"), + "groups": names_of_users.get("groups"), + "email": names_of_users.get("email") + } + json_user_template["users"].append(usernames) + + _user_data = { + "users": json_user_template["users"] + } + config["json_build"].update( + _user_data + ) + + if "history" in [x.lower() for x in json_properties]: + print("Extracting change history from issues") + + for search_history in config["json_build"]["projects"]: + issue_history = search_history["issues"] + for history in issue_history: + key = history.get("key") + process_executor( + parse_history_data, + data=key, + workers=workers, + timeout=timeout + ) + + print("Appending historic data into JSON structure") + # If there are any running threads, let's wait for + # their shutdown + sleep(flush) + + for search_history in config["json_build"]["projects"]: + issue_history = search_history["issues"] + for history in issue_history: + key = history.get("key") + for sub_history in json_history_template["history"]: + sub_key = sub_history.get("key") + sub_value = sub_history.get("value") + if key == sub_key: + history["history"] = sub_value + + print("Clearing temporary configuration data") + project_settings.clear() + project_config.clear() + config["save_point"].clear() + + if ext.lower() == "json": + json_field_builder() + extend_file_type() + for file in config["exports"]: + path = path_builder(folder, file) + os.remove(path) + + if exclude_fields: + field_exclude() + elif include_fields: + field_include() + + extend_format(extension) + print( "Export Completed.File located at {}".format( path_builder(folder, final_file) @@ -2904,12 +5012,12 @@ class Users: user_list = deque() def get_all_users( - self, - pull: str = "both", - user_type: str = "atlassian", - file: str = None, - folder: str = Any, - **kwargs, + self, + pull: str = "both", + user_type: str = "atlassian", + file: str = None, + folder: str = Any, + **kwargs, ) -> None: """Generates a list of users. @@ -2950,7 +5058,7 @@ def get_all_users( print("Current Record - At Row", count_start_at) add_log(f"Current Record - At Row {count_start_at}", "info") - if str(results) == "[]": + if not results: break else: sys.stderr.write( @@ -2965,7 +5073,7 @@ def get_all_users( self.report(category=folder, filename=file, **kwargs) def report( - self, category: str = Any, filename: str = "users_report.csv", **kwargs + self, category: str = Any, filename: str = "users_report.csv", **kwargs ) -> None: """Creates a user report file in CSV format. :return: None @@ -2977,7 +5085,7 @@ def report( add_log(f"Generating report file on {filename}", "info") def user_activity( - self, status: str = Any, account_type: str = Any, results: List = Any + self, status: str = Any, account_type: str = Any, results: List = Any ) -> None: """Determines users activity. @@ -3006,11 +5114,11 @@ def stack(c: Any, f: List, s: Any) -> None: stack(self, list_user, each_user) def get_all_users_group( - self, - group_folder: str = "Groups", - group_file_name: str = "group_file.csv", - user_extraction_file: str = "group_extraction.csv", - **kwargs, + self, + group_folder: str = "Groups", + group_file_name: str = "group_file.csv", + user_extraction_file: str = "group_extraction.csv", + **kwargs, ) -> None: """Get all users and the groups associated to them on the Instance. :return: None @@ -3040,7 +5148,7 @@ def get_all_users_group( add_log("Get Users group Completed", "info") def search_user( - self, find_user: Union[str, list] = None, folder: str = "Users", **kwargs + self, find_user: Union[str, list] = None, folder: str = "Users", **kwargs ) -> Union[list, int]: """Get a list of all cloud users and search for them by using the displayName. @@ -3050,12 +5158,17 @@ def search_user( :param kwargs: Additional arguments - *options* - skip (bool) - allows you to skip the header of ``file_reader`` - delimiter (str) - allows a delimiter to the ``file_reader`` function - pull (str) - determines which user is available e.g. "active", "inactive" - user_type (str) - searches for user type e.g "atlassian", "customer" - file (str) - Name of the file + **options** + + * skip (bool) - allows you to skip the header of ``file_reader`` + + * delimiter (str) - allows a delimiter to the ``file_reader`` function + + * pull (str) - determines which user is available e.g. "active", "inactive" + + * user_type (str) - searches for user type e.g "atlassian", "customer" + + * file (str) - Name of the file """ pull = kwargs["pull"] if "pull" in kwargs else "both" @@ -3156,14 +5269,14 @@ def path_builder(path: str = "Report", file_name: str = Any, **kwargs) -> str: def file_writer( - folder: str = WORK_PATH, - file_name: str = None, - data: Iterable = object, - mark: str = "single", - mode: str = "a+", - content: str = None, - **kwargs, -) -> Any: + folder: str = WORK_PATH, + file_name: str = None, + data: Iterable = object, + mark: str = "single", + mode: str = "a+", + content: Union[str, bytes] = None, + **kwargs, +) -> None: """Reads and writes to a file, single or multiple rows or write as byte files. :param folder: A path to the name of the folder @@ -3182,11 +5295,13 @@ def file_writer( :param kwargs: Additional parameters - *options* + **options** + + * delimiter: defaults to comma - datatype (strings) - delimiter: defaults to comma - datatype (strings) + * encoding: defaults to utf-8 - datatype (strings) - encoding: defaults to utf-8 - datatype (strings) + * errors: defaults to replace - datatype (strings) .. versionchanged:: 0.7.3 @@ -3226,12 +5341,12 @@ def file_writer( def file_reader( - folder: str = WORK_PATH, - file_name: str = None, - mode: str = "r", - skip: bool = False, - content: bool = False, - **kwargs, + folder: str = WORK_PATH, + file_name: str = None, + mode: str = "r", + skip: bool = False, + content: bool = False, + **kwargs, ) -> Union[List[List[str]], str]: """Reads a CSV file and returns a list comprehension of the data or reads a byte into strings. @@ -3247,11 +5362,13 @@ def file_reader( :param kwargs: Additional parameters - *options* + **options** - encoding - standard encoding strings. e.g “utf-8”. + * encoding - standard encoding strings. e.g “utf-8”. - delimiter: defaults to comma. + * delimiter: defaults to comma. + + * errors: defaults to replace .. versionchanged:: 0.7.3 @@ -3288,7 +5405,7 @@ def file_reader( def replacement_placeholder( - string: str = None, data: List = None, iterable: List = None, row: int = 2 + string: str = None, data: List = None, iterable: List = None, row: int = 2 ) -> Any: """Return multiple string replacement. @@ -3326,14 +5443,14 @@ def replacement_placeholder( def delete_attachments( - file: Optional[str] = None, - search: Union[str, Dict, List, int] = None, - delete: bool = True, - extension: Union[str, List] = None, - by_user: Optional[List] = None, - by_size: Optional[str] = None, - by_date: Optional[str] = None, - **kwargs: Union[str, bool], + file: Optional[str] = None, + search: Union[str, Dict, List, int] = None, + delete: bool = True, + extension: Union[str, List] = None, + by_user: Optional[List] = None, + by_size: Optional[str] = None, + by_date: Optional[str] = None, + **kwargs: Union[str, bool], ) -> None: """ A function that helps to delete attachments on Jira issues. @@ -3388,7 +5505,7 @@ def delete_attachments( :param kwargs: Additional arguments - *Available options* + **Available options** * allow_cp: Allows the ability to trigger and save a checkpoint. @@ -3685,7 +5802,7 @@ def get_attachments(items: Dict) -> None: """ nonlocal attach_load, count, depth infinity_point = data_brick["point"] - issues = items["issues"][data_brick["point"] :] + issues = items["issues"][data_brick["point"]:] attach_load = data_brick["data_block"] if back_up is True else attach_load count = set_up["iter"] if back_up is True and depth == 1 else data_brick["iter"] @@ -3752,7 +5869,7 @@ def inf_block(atl: bool = False): set_up = {} def data_wipe( - del_, counts_, usr: bool = False, fl: bool = False, _items_=None + del_, counts_, usr: bool = False, fl: bool = False, _items_=None ) -> None: """ Trigger the delete mode. From 55f4d551a4c3126282c6c81f78acb1624d473238 Mon Sep 17 00:00:00 2001 From: Prince Date: Sat, 27 May 2023 12:33:57 +0200 Subject: [PATCH 15/17] Update reporting.py * Added more arguments to the issue_export() function --- jiraone/reporting.py | 44 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/jiraone/reporting.py b/jiraone/reporting.py index 218eed38..1f6c34aa 100644 --- a/jiraone/reporting.py +++ b/jiraone/reporting.py @@ -2056,7 +2056,8 @@ def export_issues( * allow_media: Datatype (bool) Ability to add a user credential to each attachment uri of the "Attachment" column - of a CSV export + of a CSV export. This helps to easily append credentials to + all rows of the CSV export with your current credentials. * sub_tasks: Datatype (list) Ability to identify all the sub-tasks issues present in a JSON export. Useful when you @@ -3726,10 +3727,10 @@ def field_exclude() -> None: first_run = True exclude_list.append(exclude_data) - print("Excluding declared field columns into the CSV file") + print("Reconstructing file headers") file_writer(folder, temp_file, data=[file_headers], mark="many", mode="w+") - print("Reconstructing file headers") + print("Excluding declared field columns into the CSV file") file_writer(folder, temp_file, data=exclude_list, mark="many") @@ -3762,10 +3763,10 @@ def field_include() -> None: first_run = True include_list.append(include_data) - print("Including declared field columns into the CSV file") + print("Reconstructing file headers") file_writer(folder, temp_file, data=[file_headers], mark="many", mode="w+") - print("Reconstructing file headers") + print("Including declared field columns into the CSV file") file_writer(folder, temp_file, data=include_list, mark="many") @@ -4933,6 +4934,39 @@ def run_multi_check(parent_key: str) -> str: path = path_builder(folder, file) os.remove(path) + if allow_media is True: + data_frame(files_=temp_file) + attach_read = file_reader(folder, + temp_file, + skip=True) + + _change_flag_ = False + for _attach_column_ in attach_read: + for allow_row, attach_header in zip(_attach_column_, + config["headers"]): + if attach_header.get("column_name") == "Attachment": + if allow_row is None or allow_row == "": + pass + else: + _change_flag_ = True + _get_items_ = allow_row.split(";") + get_attachment = _get_items_.pop(-1) + get_parse_value = parse_media(get_attachment) + _get_items_.append(get_parse_value) + amend_attachment = ";".join(_get_items_) + _attach_column_[ + attach_header.get("column_index") + ] = amend_attachment + + _file_headers_ = [x.get("column_name") for x in config["headers"]] + print("Reconstructing file headers") + file_writer(folder, temp_file, data=[_file_headers_], + mark="many", mode="w+") + print("Applying updated data into the CSV file") if _change_flag_ \ + is True else print("No change for attachment done to CSV file") + file_writer(folder, temp_file, data=attach_read, + mark="many") + if exclude_fields: field_exclude() elif include_fields: From 3b40d17b2f66f482b06187c714050e46341274bd Mon Sep 17 00:00:00 2001 From: Prince Date: Sat, 27 May 2023 12:37:27 +0200 Subject: [PATCH 16/17] Update CHANGES.md * Update to changelog with newly added functionalities --- CHANGES.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 2faf8d50..f8d1dcb7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,16 @@ # Jira one change log +**Release 0.7.6** - 2023-05-27 +### Minor update #110 +* Added multiple arguments to `issue_export` method + * It is now possible to perform an export in JSON format + * Added field exclusion and inclusion in CSV format export +* Added new methods `get_project_versions` and `issue_link_types` to access.py module +* Added a `process_executor` function, regular express constants and a `DateFormat` class to +the utils module of jiraone. +* Added examples in documentations + + **Release 0.7.5** - 2023-03-16 ### Update #109 * Added a new condition to use encoding argument in `file_writer` in text mode From 9f14c1e1ef254af0c56cfe8dd0cca7642bda8b0b Mon Sep 17 00:00:00 2001 From: Prince Date: Sat, 27 May 2023 12:38:06 +0200 Subject: [PATCH 17/17] Update SECURITY.md * Amended security version --- SECURITY.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index 5bb07bc9..29c29158 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -5,12 +5,13 @@ Below shows the list of supported version for the jiraone library | Version | Supported | -| ------- | ------------------ | +|---------|--------------------| +| 0.7.6 | :white_check_mark: | | 0.7.5 | :white_check_mark: | | 0.7.4 | :white_check_mark: | | 0.7.3 | :white_check_mark: | -| 0.7.2 | :white_check_mark: | -| 0.7.1 | :white_check_mark: | +| 0.7.2 | :x: | +| 0.7.1 | :x: | | 0.7.0 | :x: | | 0.6.5 | :x: | | 0.6.3 | :x: |