Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this user
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
delriot
/
AugmentingMathematicalDataset
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
1
Code
Issues
0
Pull requests
0
Projects
0
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Projects
Security
Insights
Files
main
.github
Heuristics
config
datasets
packages
utils
README.md
basic_ml.py
choose_hyperparams.py
create_clean_dataset.py
find_filename.py
main.py
main_heuristics.py
main_regression.py
main_reinforcement.py
make_plots.py
output.txt
preprocessing_Dorians_features.py
replicating_Dorians_features.py
requirements.txt
run_for_paper.py
test_models.py
test_train_datasets.py
train_models.py
yaml_tools.py
Breadcrumbs
AugmentingMathematicalDataset
/
main_heuristics.py
Blame
Blame
Latest commit
History
History
100 lines (91 loc) · 3.92 KB
Breadcrumbs
AugmentingMathematicalDataset
/
main_heuristics.py
Top
File metadata and controls
Code
Blame
100 lines (91 loc) · 3.92 KB
Raw
import csv import math import pickle import random # import numpy as np from Heuristics.heuristics_guess import not_greedy_heuristic_guess from Heuristics.heuristics_guess import ordering_given_projections # from find_filename import find_dataset_filename # from test_models import compute_metrics # from config.ml_models import heuristics random.seed(0) nvar = 3 testing_method = 'Biased' # # TESTING GMODS IN AUUGMENTED : Features 2, 67 and 132 # def choose_gmods(features): # a = [] # # # print(features) # # a.append(features[2]) # # a.append(features[67]) # # a.append(features[132]) # if a[0] == min(a): # if a[1] <= a[2]: # return 0 # else: # return 1 # elif a[1] == min(a): # if a[0] <= a[2]: # return 2 # else: # return 3 # elif a[2]==min(a): # if a[0]<=a[1]: # return 4 # else: # return 5 def ordering_choices_heuristics(heuristic, testing_dataset, paradigm): if heuristic == 'virtual-best': chosen_indices = testing_dataset['labels'] elif heuristic == 'random': chosen_indices = [random.randint(0, len(timings)-1) for timings in testing_dataset['timings']] else: if paradigm == 'Greedy': chosen_indices = [ordering_given_projections(projection, heuristic) for projection in testing_dataset['projections']] elif paradigm == 'NotGreedy': chosen_indices = [not_greedy_heuristic_guess(polynomials, heuristic) for polynomials in testing_dataset['polynomials']] else: raise Exception(f"Paradigm {paradigm} not recognised for a heuristic.") return chosen_indices # if __name__ == "__main__": # test_dataset_filename = find_dataset_filename('Test', # testing_method) # with open(test_dataset_filename, 'rb') as test_dataset_file: # testing_dataset = pickle.load(test_dataset_file) # output_file = "heuristics_output_acc_time.csv" # # Testing in heuristics that make all the choice at once # first_heuristic = 1 # for greedy in [True, False]: # for heuristic in heuristics: # # for heuristic in ['gmods', 'virtual best']: # reps = 100 # for i in range(reps): # chosen_indices = ordering_choices_heuristics(heuristic, # testing_dataset, # greedy=greedy) # metrics = compute_metrics(chosen_indices, # testing_dataset) # if i == 0: # sum_metrics = metrics # else: # sum_metrics = {key: metrics[key] + sum_metrics[key] # for key in metrics} # aveg_metrics = {key: sum_metrics[key]/reps for key in sum_metrics} # augmented_metrics = {key: aveg_metrics[key] # if key in ['Accuracy', 'Markup'] # else math.factorial(nvar)*aveg_metrics[key] # for key in sum_metrics} # print('not-'*(not greedy) + 'greedy-' + heuristic, # augmented_metrics) # if first_heuristic == 1: # first_heuristic = 0 # keys = list(augmented_metrics.keys()) # with open(output_file, 'a') as f: # f.write(', '.join(['Model'] + keys) + '\n') # with open(output_file, 'a', newline='') as f: # writer = csv.writer(f) # writer.writerow(['not-'*(not greedy) + 'greedy-' + heuristic] # + [augmented_metrics[key] for key in keys])
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
You can’t perform that action at this time.