Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Deadlines to Output #114

Open
wants to merge 18 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Merge branch 'main' into feature/add-deadlines
  • Loading branch information
burgess01 authored Mar 9, 2023
commit 971c91b94e7ee9dd259d5cc513d26bde801dc9a9
2 changes: 1 addition & 1 deletion gatorgrade/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def gatorgrade(
# there are valid checks and thus the
# tool should run them with run_checks
if len(checks) > 0:
checks_status = run_checks(checks, deadline)
checks_status = run_checks(checks, report, deadline)
# no checks were created and this means
# that, most likely, the file was not
# valid and thus the tool cannot run checks
Expand Down
161 changes: 161 additions & 0 deletions gatorgrade/output/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,117 @@ def _run_gg_check(check: GatorGraderCheck) -> CheckResult:
)


def create_report_json(
passed_count,
checkResults: List[CheckResult],
percent_passed,
) -> dict:
"""Take checks and put them into json format in a dictionary.

Args:
passed_count: the number of checks that passed
check_information: the basic information about checks and their params
checkResults: the list of check results that will be put in json
percent_passed: the percentage of checks that passed
"""
# create list to hold the key values for the dictionary that
# will be converted into json
overall_key_list = ["amount_correct", "percentage_score", "checks"]

checks_list = []
overall_dict = {}

# for each check:
for i in range(len(checkResults)):
# grab all of the information in it and add it to the checks list
results_json = checkResults[i].json_info
results_json["status"] = checkResults[i].passed
if not checkResults[i].passed:
results_json["diagnostic"] = checkResults[i].diagnostic
checks_list.append(results_json)

# create the dictionary for all of the check information
overall_dict = dict(
zip(overall_key_list, [passed_count, percent_passed, checks_list])
)
return overall_dict


def create_markdown_report_file(json: dict) -> str:
"""Create a markdown file using the created json to use in github actions summary, among other places.

Args:
json: a dictionary containing the json that should be converted to markdown
"""
markdown_contents = ""
passing_checks = []
failing_checks = []

num_checks = len(json.get("checks"))

# write the total, amt correct and percentage score to md file
markdown_contents += f"# Gatorgrade Insights\n\n**Project Name:** {Path.cwd().name}\n**Amount Correct:** {(json.get('amount_correct'))}/{num_checks} ({(json.get('percentage_score'))}%)\n"

# split checks into passing and not passing
for check in json.get("checks"):
# if the check is passing
if check["status"] == True:
passing_checks.append(check)
# if the check is failing
else:
failing_checks.append(check)

# give short info about passing checks
markdown_contents += "\n## Passing Checks\n"
for check in passing_checks:
if "description" in check:
markdown_contents += f"\n- [x] {check['description']}"
else:
markdown_contents += f"\n- [x] {check['check']}"

# give extended information about failing checks
markdown_contents += "\n\n## Failing Checks\n"
# for each failing check, print out all related information
for check in failing_checks:
# for each key val pair in the check dictionary
if "description" in check:
markdown_contents += f"\n- [ ] {check['description']}"
else:
markdown_contents += f"\n- [ ] {check['check']}"

if "options" in check:
for i in check.get("options"):
if "command" == i:
val = check["options"]["command"]
markdown_contents += f"\n\t- **command** {val}"
if "fragment" == i:
val = check["options"]["fragment"]
markdown_contents += f"\n\t- **fragment:** {val}"
if "tag" == i:
val = check["options"]["tag"]
markdown_contents += f"\n\t- **tag:** {val}"
if "count" == i:
val = check["options"]["count"]
markdown_contents += f"\n\t- **count:** {val}"
if "directory" == i:
val = check["options"]["directory"]
markdown_contents += f"\n\t- **directory:** {val}"
if "file" == i:
val = check["options"]["file"]
markdown_contents += f"\n\t- **file:** {val}"
elif "command" in check:
val = check["command"]
markdown_contents += f"\n\t- **command:** {val}"
if "diagnostic" in check:
markdown_contents += f"\n\t- **diagnostic:** {check['diagnostic']}"
markdown_contents += "\n"

return markdown_contents


def configure_report(report_params: Tuple[str, str, str], report_output_data: dict):
"""Put together the contents of the report depending on the inputs of the user.

def calculate_deadline_time_dif(older_time: datetime, latest_time: datetime):
"""Input two times and return the difference of the two in days, hours, minutes, and seconds.

Expand All @@ -97,6 +208,51 @@ def calculate_deadline_time_dif(older_time: datetime, latest_time: datetime):


def run_checks(checks: List[Union[ShellCheck, GatorGraderCheck]], deadline) -> bool:
"""Runs the generated checks.
Args:
report_params: The details of what the user wants the report to look like
report_params[0]: file or env
report_params[1]: json or md
report_params[2]: name of the file or env
report_output_data: the json dictionary that will be used or converted to md
"""
# if the user wants markdown, convert the json into md
if report_params[1] == "md":
report_output_data = create_markdown_report_file(report_output_data)

# if the user wants the data stored in a file:
if report_params[0] == "file":
# try to store it in that file
try:
# Second argument has to be json or md
if report_params[1] != "json" and report_params[1] != "md":
rich.print(
"\n[red]The second argument of report has to be 'md' or 'json' "
)
else:
with open(report_params[2], "w", encoding="utf-8") as file:
if report_params[1] == "json":
file.write(json.dumps(report_output_data))
else:
file.write(str(report_output_data))
except:
rich.print(
"\n[red]Can't open or write the target file, check if you provide a valid path"
)
elif report_params[0] == "env":
if report_params[2] == "GITHUB_STEP_SUMMARY":
env_file = os.getenv("GITHUB_STEP_SUMMARY")
with open(env_file, "a") as env_file:
env_file.write(str(report_output_data))
else:
os.environ[report_params[2]] = str(report_output_data)
else:
rich.print("\n[red]The first argument of report has to be 'env' or 'file' ")


def run_checks(
checks: List[Union[ShellCheck, GatorGraderCheck]], report: Tuple[str, str, str]
) -> bool:
"""Run shell and GatorGrader checks and display whether each has passed or failed.

Also, print a list of all failed checks with their diagnostics and a summary message that
Expand Down Expand Up @@ -157,6 +313,11 @@ def run_checks(checks: List[Union[ShellCheck, GatorGraderCheck]], deadline) -> b
f"\n-~- Your assignment is due in {days * -1} days, {hours} hours, {minutes} minutes, and {seconds} seconds. -~-"
)

# if the report is wanted, create output in line with their specifications
if all(report):
report_output_data = create_report_json(passed_count, results, percent)
configure_report(report, report_output_data)

# compute summary results and display them in the console
summary = f"Passed {passed_count}/{len(results)} ({percent}%) of checks for {Path.cwd().name}!"
summary_color = "green" if passed_count == len(results) else "bright white"
Expand Down
8 changes: 4 additions & 4 deletions tests/output/test_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def test_run_checks_gg_check_should_show_passed(capsys):
)
report = (None, None, None)
# When run_checks is called
output.run_checks([check], None)
output.run_checks([check], report, None)
# Then the output shows that the check has passed
out, _ = capsys.readouterr()
assert "✓ Check TODOs" in out
Expand Down Expand Up @@ -109,7 +109,7 @@ def test_run_checks_invalid_gg_args_prints_exception(capsys):
)
report = (None, None, None)
# When run_checks is called
output.run_checks([check], None)
output.run_checks([check], report, None)
# Then the output contains a declaration
# about the use of an Invalid GatorGrader check
out, _ = capsys.readouterr()
Expand Down Expand Up @@ -159,7 +159,7 @@ def test_run_checks_some_failed_prints_correct_summary(capsys):
]
report = (None, None, None)
# When run_checks is called
output.run_checks(checks, None)
output.run_checks(checks, report, None)
# Then the output shows the correct fraction and percentage of passed checks
out, _ = capsys.readouterr()
assert "Passed 2/3 (67%) of checks" in out
Expand Down Expand Up @@ -206,7 +206,7 @@ def test_run_checks_all_passed_prints_correct_summary(capsys):
]
report = (None, None, None)
# When run_checks is called
output.run_checks(checks, None)
output.run_checks(checks, report, None)
# Then the output shows the correct fraction and percentage of passed checks
out, _ = capsys.readouterr()
assert "Passed 3/3 (100%) of checks" in out
Expand Down
You are viewing a condensed version of this merge commit. You can view the full changes here.