-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathclassifyingFunctions.py
More file actions
170 lines (132 loc) · 5.85 KB
/
classifyingFunctions.py
File metadata and controls
170 lines (132 loc) · 5.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
#written by Viktor Zenkov in 2018
#this file contains functions for reading in training data and processing it and splitting off the training data
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from data_utils import get_file
from sequence import _remove_long_seq
import numpy as np
import json
import warnings
import os
import math
import time
#this function loads the data.
#path is "where to cache the data (relative to `~/.keras/dataset`)"
#num_words is the number of unique words to keep
#skip_top is the number of most frequent words to skip (for example, if "the" was most common in a context this might be altered to skip "the")
#maxlen is the length of each sequence to keep from each file (keep the first maxlen words)
#seed is the random seed to use for shuffling the data
#start_char is the number which is inserted at the beginning of each sequence (file)
#oov_char is the number to replace words lost because they're greater than num_words.
#index_from is used to push increase all the integers to make room for the start_char and oov_char integers
#numOrHex determines if we want to read in text (True) or hex (False) data
#useful parameters to alter are num_words, maxlen, seed, and numOrHex
def load_data(path='imdb.npz', num_words=None, skip_top=0,
maxlen=None, seed=113,
start_char=1, oov_char=2, index_from=3, numOrHex=True, **kwargs):
# Legacy support
if 'nb_words' in kwargs:
warnings.warn('The `nb_words` argument in `load_data` '
'has been renamed `num_words`.')
num_words = kwargs.pop('nb_words')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
#if we want to use only a few files, set limitNumFiles to True
numFiles = 100
limitNumFiles=False
f_path = ''
#the paths of the text or hex integer files
if numOrHex:
f_path = '/asmtextOutputIntegers'
else:
f_path = '/asmhexOutputIntegers'
#get the files in order and keep only txt files
allFileNames = sorted(os.listdir(f_path))
allFileNames = [f_name for f_name in allFileNames if f_name.endswith('.txt')]
xs = []
print("starting reading files at ", time.time())
counter = 0
#read in each file and puts its contents into an element of xs
for f_name in allFileNames:
#small number of files code:
counter += 1
if (limitNumFiles and counter > numFiles):
break
f = open(f_path + '/' + f_name,encoding='latin-1')
tempS = f.read()
xs.append(tempS)
print("finished reading files at ", time.time())
#read in the file labels and create a dictionary
dictionaryLabels = {}
with open('trainLabels.csv','r') as dicFile:
for lineEntry in dicFile:
bothWords = lineEntry.split(',')
dictionaryLabels[bothWords[0][1:-1]] = bothWords[1][:-1]
labels = []
counter = 0
#create a labels array that matches the xs array
for f_name in allFileNames:
#small number of files code:
counter += 1
if (limitNumFiles and counter > numFiles):
break
#the numbers here are based on removing "Numbers.txt" or "hexIntegers.txt" from the end of the files
if numOrHex:
labels.append(dictionaryLabels[f_name[:-11]])
else:
labels.append(dictionaryLabels[f_name[:-15]])
print("starting quantifying files at ", time.time())
#turn the strings into lists of integers
xs = list(list(int(w) for w in xelem.split()) for xelem in xs)
xs = np.array(xs)
#turn the strings into integers
labels = list(int(w) for w in labels)
labels = np.array(labels)
#randomly sort the xs and labels arrays in the same way
np.random.seed(seed)
indices = np.arange(len(xs))
np.random.shuffle(indices)
xs = xs[indices]
labels = labels[indices]
#add a start character to the beginning of each file array and increase all the file integers by index_from
if start_char is not None:
xs = [[start_char] + [w + index_from for w in x] for x in xs]
elif index_from:
xs = [[w + index_from for w in x] for x in xs]
#remove integers after maxlen length in each file
if maxlen:
xs, labels = _remove_long_seq(maxlen, xs, labels)
if not xs:
raise ValueError('After filtering for sequences shorter than maxlen=' +
str(maxlen) + ', no sequence was kept. '
'Increase maxlen.')
if not num_words:
num_words = max([max(x) for x in xs])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [[w if (skip_top <= w < num_words) else oov_char for w in x] for x in xs]
else:
xs = [[w for w in x if skip_top <= w < num_words] for x in xs]
#put 2/10 of the files into test data and the rest as training.
idx = math.floor(len(xs)*8/10)
x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx])
x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:])
print("finished quantifying files at ", time.time())
return (x_train, y_train), (x_test, y_test)
#this is not being used
def get_word_index(path='imdb_word_index.json'):
"""Retrieves the dictionary mapping words to word indices.
# Arguments
path: where to cache the data (relative to `~/.keras/dataset`).
# Returns
The word index dictionary.
"""
# path = get_file(path,
#origin='https://s3.amazonaws.com/text-datasets/imdb_word_index.json',
#file_hash='bfafd718b763782e994055a2d397834f')
path = "imdb_word_index.json"
with open(path) as f:
return json.load(f)