forked from CapriRecSys/CAPRI
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathcommandParser.py
133 lines (120 loc) · 3.83 KB
/
commandParser.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
from utils import logger
import inquirer
# from inquirer import prompt
from config import datasets, models, rerankers, fusions, evaluationMetrics, fairnessModules
modelChoices = []
rerankingChoices = []
fusionChoices = []
fairnessChoices = []
datasetChoices = []
evaluatorChoices = []
def initChoices():
"""
Preparing choices for the questions
Parameters
----------
None
Returns
-------
None
"""
# Preparing model items
for model in models:
modelChoices.append(model)
# Preparing reranking items
for reranker in rerankers:
rerankingChoices.append(reranker)
# Preparing fairness module items
for mod in fairnessModules:
fairnessChoices.append(mod)
# Preparing dataset items
for dataset in datasets:
datasetChoices.append(dataset)
# Preparing fusion items
for fusion in fusions:
fusionChoices.append(fusion)
# Preparing evaluation metrics items
for evaluator in evaluationMetrics:
evaluatorChoices.append({'name': evaluator})
def interactiveCommandForm():
"""
Generating the interactive form for the user to select the parameters
Parameters
----------
None
Returns
-------
userInputs: dict
Dictionary containing the user inputs
"""
# Initiate choices
initChoices()
# Appy choices to the questions
questions = [
inquirer.List('Model',
message="Choose the model you need:",
choices=modelChoices
),
inquirer.List('Reranking',
message="Choose the reranking method you need:",
choices=rerankingChoices
),
inquirer.List('Fairness',
message="Choose a fairness consideration to integrate:",
choices=fairnessChoices
),
inquirer.List('Dataset',
message="Choose the dataset you need:",
choices=datasetChoices
),
inquirer.List('Fusion',
message="Choose the fusion you need:",
choices=fusionChoices
),
inquirer.Checkbox('Evaluation',
message="Choose at least one evaluation metric:",
choices=evaluatorChoices
),
inquirer.Confirm('Confirmation',
message="Do you confirm your selected choices?",
default=True
),
]
# Showing the selected items to the user
userInputs = inquirer.prompt(questions)
return userInputs
def getUserChoices():
"""
Getting the user inputs and validating them
Parameters
----------
None
Returns
-------
userInputs: dict
Dictionary containing the user inputs
"""
userInputs = interactiveCommandForm()
confirmation = userInputs['Confirmation']
if (confirmation == True):
print('Validating your choices ...')
selectedModelScopes = models[userInputs['Model']]
selectedDatasetScopes = datasets[userInputs['Dataset']]
ignoredContexts = []
# Checking if dataset covers all scopes of models
isCovered = all(
item in selectedDatasetScopes for item in selectedModelScopes)
if (not isCovered):
difference = [
item for item in selectedModelScopes if item not in selectedDatasetScopes]
printMessage = f'Ignoring {difference} scope(s) of {userInputs["Model"]}, as not covered in {userInputs["Dataset"]}!'
logger(printMessage, 'warn')
ignoredContexts = difference
# Checking if at least one evaluation metric is selected
if (len(userInputs['Evaluation']) == 0):
printMessage = 'No evaluation metric has been selected!'
logger(printMessage, 'error')
return
logger(f'User inputs: {userInputs}', 'info', True)
userInputs['Ignored'] = ignoredContexts
return userInputs