Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this user
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
delriot
/
AugmentingMathematicalDataset
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
1
Code
Issues
0
Pull requests
0
Projects
0
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Projects
Security
Insights
Files
main
.github
Heuristics
__init__.py
best_features
create_graphs_heuristics.py
extra_metrics.py
heuristic_tools.py
heuristics_guess.py
heuristics_rules.py
config
datasets
packages
utils
README.md
basic_ml.py
choose_hyperparams.py
create_clean_dataset.py
find_filename.py
main.py
main_heuristics.py
main_regression.py
main_reinforcement.py
make_plots.py
output.txt
preprocessing_Dorians_features.py
replicating_Dorians_features.py
requirements.txt
run_for_paper.py
test_models.py
test_train_datasets.py
train_models.py
yaml_tools.py
Breadcrumbs
AugmentingMathematicalDataset
/
Heuristics
/
extra_metrics.py
Blame
Blame
Latest commit
History
History
44 lines (36 loc) · 2.25 KB
Breadcrumbs
AugmentingMathematicalDataset
/
Heuristics
/
extra_metrics.py
Top
File metadata and controls
Code
Blame
44 lines (36 loc) · 2.25 KB
Raw
''' This file studies the metrics of the virtual_best and the random choices. ''' from .heuristic_tools import finding_time_limit, compute_markups, compute_ncells_markup def compute_extra_metrics(heuristic, virtual_best_timings, timings, number_no_timedout, useful_timings, ncells): if heuristic == 'virtual_best': metrics = compute_virtual_best_metrics(heuristic, virtual_best_timings) elif heuristic == 'random': metrics = compute_average_metrics(heuristic, virtual_best_timings, timings, number_no_timedout, useful_timings, ncells) return metrics def compute_virtual_best_metrics(heuristic, virtual_best_timings): metrics = dict() metrics['name'] = 'virtual-best' no_samples = len(virtual_best_timings) metrics['accuracy'] = 1 metrics['no_samples'], metrics['terminating'], metrics['timeouts_30'], metrics['timeouts_60'] = no_samples, no_samples, 0, 0 metrics['markup'], metrics['ncells_markup'] = 0, 0 metrics['total_time'] = sum(virtual_best_timings) metrics['perc_found_1'], metrics['perc_found_2'], metrics['perc_found_3'] = 1, 1, 1 return metrics def compute_average_metrics(heuristic, virtual_best_timings, timings, number_no_timedout, useful_timings, ncells): metrics = dict() metrics['name'] = 'random' no_samples = len(virtual_best_timings) metrics['no_samples'] = no_samples metrics['accuracy'] = 1/6 prob_timeouts = [pos_timeout/6 for pos_timeout in number_no_timedout] metrics['terminating'] = no_samples - sum(prob_timeouts) metrics['timeouts_30'] = sum([prob_timeout for prob_timeout,timing in zip(prob_timeouts,timings) if finding_time_limit(timing)==30]) metrics['timeouts_60'] = sum([prob_timeout for prob_timeout,timing in zip(prob_timeouts,timings) if finding_time_limit(timing)==60]) expected_timings = [sum(useful_timing)/len(useful_timing) for useful_timing in useful_timings] metrics['markup'] = compute_markups(virtual_best_timings, expected_timings) metrics['ncells_markup'] = sum([sum([elem if type(elem)!=str else 10 for elem in ex_ncells])/len(ex_ncells) for ex_ncells in ncells])/len(ncells) metrics['total_time'] = sum(expected_timings) metrics['perc_found_1'], metrics['perc_found_2'], metrics['perc_found_3'] = 1/6, 2/6, 3/6 return metrics
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
You can’t perform that action at this time.