forked from test-v1/uob-summer-project
-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathpreprocess_code.py
More file actions
443 lines (337 loc) · 13.2 KB
/
preprocess_code.py
File metadata and controls
443 lines (337 loc) · 13.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
"""
Module with utility functions for working with and preprocessing
source code.
Explanation:
## Guide of preprocess_code
Most of our codes for preprecessing are in this file `preprocess_code.py`
### The usage of clang
In our project, we need to get ast tree for every C/C++ source code. This package `clang.cindex` provides us a way to generate `ast_root` from which we can walk all the children nodes in this ast tree.
### How to use the tree
When we get the tree, we want to number each tree in order to get an edgelist or something.
But every time we run `get_children` for our nodes, it returns a new object which will lose information. So you can use `concretise_ast` to solve this problem.
After that, we can number the node by using `number_ast_nodes`(start with `counter = 1`). It is easy to generate edgelist by using `generate_edgelist` funtion which walk the graph and generate the edgelist from `ast_root` generated by `clang` before.
### Graph2vec
### Dask
Since the data size is large, we use dask in our function named `preprocess_all_for_graph2vec` to do it seperately.
## Node2vec
Node2vec requires edgelist for each ast. So by simply removing the features we get node2vec function named `process_for_node2vec` which can be used in `preprocess_all_for_adjmatrix` to get adj matrix and `preprocess_all_for_node2vec`.
### Edgelist
The outputs of all the edgelists are saved into other git repo named [`uob-summer-project-node2vec`](https://github.com/xihajun/uob-summer-project-node2vec)
## Adjacacency and Feature Matrices
The adjacency matrix is saved into the file `../data/adj.pickle`.
Our adjencency matrices for some reason is undirected, to fix this issue, we just need to get rid of the up-triangle values.
The feature matrix is saved into the file `../data/feature_matrix.pickle`.
Our feature matrices have columns that represent properties of a node and each node is one-hot encoded (with each row representing a node).
"""
import clang.cindex
import gc
import json
import os
import subprocess
import swifter
import tempfile
import dask.dataframe as dd
import numpy as np
import pandas as pd
from tqdm import tqdm
# This cell might not be needed for you.
clang.cindex.Config.set_library_file(
'/usr/lib/llvm-7/lib/libclang-7.so.1'
)
def snap_graph_from_clang_ast(ast_root):
"""
Given a concretised clang abstract syntax tree with node
identifiers (i.e. you've run
concretise_ast(ast_root)
number_ast_nodes(ast_root)
before calling this function), this outputs a directed graph
compatible with the snap-python library:
https://github.com/snap-stanford/snap-python
"""
graph = snap.TNGraph.New()
def walk_ast_and_construct_graph(node):
graph.AddNode(node.identifier)
for child in node.children:
walk_ast_and_construct_graph(child)
graph.AddEdge(node.identifier, child.identifier)
walk_ast_and_construct_graph(ast_root)
return graph
def concretise_ast(node):
"""
Everytime you run .get_children() on a clang ast node, it
gives you new objects. So if you want to modify those objects
they will lose their changes everytime you walk the tree again.
To avoid this problem, concretise_ast walks the tree once,
saving the resulting list from .get_children() into a a concrete
list inside the .children.
You can then use .children to consistently walk over tree, and
it will give you the same objects each time.
"""
node.children = list(node.get_children())
for child in node.children:
counter = concretise_ast(child)
def number_ast_nodes(node, counter=1):
"""
Given a concretised clang ast, assign each node with a unique
numerical identifier. This will be accessible via the .identifier
attribute of each node.
"""
node.identifier = counter
counter += 1
node.children = list(node.get_children())
for child in node.children:
counter = number_ast_nodes(child, counter)
return counter
def get_sub_tree(node):
"""
Given the numbered clang ast, get the sub ast just contains
the source code file.
"""
if node.identifier == 1:
# print(node)
return node
else:
for child in node.children:
response = get_sub_tree(child)
if response is not None:
return response
def get_flaw_num(node, flaw, bad_label):
"""
Given the sub tree and get the bad_label
"""
if node.location.line == flaw:
bad_label.append(node.identifier)
for child in node.children:
get_flaw_num(child, flaw, bad_label)
else:
for child in node.children:
get_flaw_num(child, flaw, bad_label)
def generate_edgelist(ast_root):
"""
Given a concretised & numbered clang ast, return a list of edges
in the form:
[
[<start_node_id>, <end_node_id>],
...
]
"""
edges = []
def walk_tree_and_add_edges(node):
for child in node.children:
edges.append([node.identifier, child.identifier])
walk_tree_and_add_edges(child)
walk_tree_and_add_edges(ast_root)
return edges
def generate_features(ast_root):
"""
Given a concretised & numbered clang ast, return a dictionary of
features in the form:
{
<node_id>: [<degree>, <type>, <identifier>],
...
}
"""
features = {}
def walk_tree_and_set_features(node):
out_degree = len(node.children)
in_degree = 1
degree = out_degree + in_degree
features[node.identifier] = [degree, str(node.kind), node.displayname]
for child in node.children:
walk_tree_and_set_features(child)
walk_tree_and_set_features(ast_root)
return features
def process_for_graph2vec(testcase, **kwargs):
"""
Takes in a list of files/datapoints from juliet.csv.zip or
vdisc_*.csv.gz (as loaded with pandas) matching one particular
testcase, and preprocesses it ready for the baseline model.
"""
parse_list = [
(datapoint.filename, datapoint.code)
for datapoint in testcase.itertuples()
]
primary = find_primary_source_file(testcase)
# Parse the source code with clang, and get out an ast:
index = clang.cindex.Index.create()
translation_unit = index.parse(
path=primary.filename,
unsaved_files=parse_list,
)
ast_root = translation_unit.cursor
# Memoise/concretise the ast so that we can consistently
# modify it, then number each node in the tree uniquely.
concretise_ast(ast_root)
number_ast_nodes(ast_root)
# Next, construct an edge list for the graph2vec input:
edgelist = generate_edgelist(ast_root)
# Construct a list of features for each node
features = generate_features(ast_root)
graph2vec_representation = {
"edges": edgelist,
"features": features,
}
# Explicitly delete clang objects
del translation_unit
del ast_root
del index
return json.dumps(graph2vec_representation)
def process_for_node2vec(testcase, **kwargs):
"""
Takes in a list of files/datapoints from juliet.csv.zip or
vdisc_*.csv.gz (as loaded with pandas) matching one particular
testcase, and preprocesses it ready for the baseline model.
"""
parse_list = [
(datapoint.filename, datapoint.code)
for datapoint in testcase.itertuples()
]
primary = find_primary_source_file(testcase)
# Parse the source code with clang, and get out an ast:
index = clang.cindex.Index.create()
translation_unit = index.parse(
path=primary.filename,
unsaved_files=parse_list,
)
ast_root = translation_unit.cursor
# Memoise/concretise the ast so that we can consistently
# modify it, then number each node in the tree uniquely.
concretise_ast(ast_root)
number_ast_nodes(ast_root)
# Next, construct an edge list for the graph2vec input:
edgelist = generate_edgelist(ast_root)
# Construct a list of features for each node
# Explicitly delete clang objects
del translation_unit
del ast_root
del index
return edgelist
def process_for_node2vec_label(testcase, **kwargs):
"""
Takes in a list of files/datapoints from juliet.csv.zip or
vdisc_*.csv.gz (as loaded with pandas) matching one particular
testcase, and preprocesses it ready for the baseline model.
"""
parse_list = [
(datapoint.filename, datapoint.code)
for datapoint in testcase.itertuples()
]
flaw_list = [datapoint.flaw_loc for datapoint in testcase.itertuples()]
primary = find_primary_source_file(testcase)
# Parse the source code with clang, and get out an ast:
index = clang.cindex.Index.create()
translation_unit = index.parse(
path=primary.filename,
unsaved_files=parse_list,
)
ast_root = translation_unit.cursor
# Memoise/concretise the ast so that we can consistently
# modify it, then number each node in the tree uniquely.
concretise_ast(ast_root)
number_ast_nodes(ast_root)
ast_root = get_sub_tree(ast_root)
# Next, construct an edge list for the graph2vec input:
bad_label = []
flaw = flaw_list[0]
get_flaw_num(ast_root, flaw, bad_label)
#import pdb; pdb.set_trace()
# Construct a list of features for each node
# Explicitly delete clang objects
del translation_unit
del ast_root
del index
return bad_label
def generate_ast_roots(testcase, **kwargs):
"""
Takes in a list of files/datapoints from juliet.csv.zip (as loaded with pandas) matching one particular
testcase, and preprocesses it ready for the feature matrix.
"""
parse_list = [
(datapoint.filename, datapoint.code)
for datapoint in testcase.itertuples()
]
primary = find_primary_source_file(testcase)
# Parse the source code with clang, and get out an ast:
index = clang.cindex.Index.create()
translation_unit = index.parse(
path=primary.filename,
unsaved_files=parse_list,
)
ast_root = translation_unit.cursor
concretise_ast(ast_root)
number_ast_nodes(ast_root)
return ast_root
def find_primary_source_file(datapoints):
"""
Given a list of datapoints representing the files for a single
testcase, try to find which of the files is the "primary"
file.
According to the Juliet documentation, this should be the
only file which defines the main function.
In contrast, there is only ever one piece of code in the
vdisc dataset.
"""
if len(datapoints) == 1:
# VDISC case and some of Juliet
return datapoints.iloc[0]
elif len(datapoints) > 1:
# Juliet only case
for datapoint in datapoints.itertuples():
for line in datapoint.code.split("\n"):
if line.startswith("int main("):
#primary = datapoint
return datapoint
return datapoints.iloc[0]
def preprocess_all_for_graph2vec(csv_location, output_location, num_partitions=20):
"""
Given a data set (e.g. juliet.csv.zip or vdisc_*.czv.gz) loaded in
as a pandas dataframe, it applies the graph2vec embedding to the
abstract syntax tree of each piece of source code. This is then
output into the file "../data/graph_embeddings.csv".
"""
print("Preprocess our code so it can be used as an input into graph2vec.")
data = pd.read_csv(csv_location)
data = dd.from_pandas(data, npartitions=num_partitions)
graphs = data.groupby(['testcase_ID']).apply(
process_for_graph2vec,
axis='columns',
meta=('processed_for_graph2vec', 'unicode'),
)
print("`-> Finished prepping data for graph2vec.")
# print("Dataset pre-processed for graph2vec. Saving to file:")
# graphs.to_csv(tmp_directory.name + "/juliet_ready_for_graph2vec.csv.gz")
# print("`-> Saved.")
print("Making a temporary directory to put our graph2vec inputs into.")
graph2vec_input_dir = output_location + "/graph2vec_input/"
os.makedirs(graph2vec_input_dir, exist_ok=True)
print("Save the graph2vec input into a file for each datapoint:")
for index, row in graphs.iteritems():
print("Current Iteration: "+str(index))
with open(graph2vec_input_dir + str(index) + ".json", 'w') as f:
f.write(row)
print("`-> Done.")
return graph2vec_input_dir
def run_graph2vec(input_dir, output_location, num_graph2vec_workers=1):
print("Runs graph2vec on each of the above datapoints")
subprocess.run([
"python3",
"/graph2vec/src/graph2vec.py",
"--workers",
str(num_graph2vec_workers),
"--input-path",
input_dir,
"--output-path",
output_location,
])
print("`-> Done.")
if __name__=="__main__":
juliet = pd.read_csv("../data/juliet.csv.zip")
example = juliet.iloc[0]
preprocessed_example = convert_to_graph2vec(example)
print("# Welcome ---------------------------------- #\n"
"Loaded in the first datapoint from juliet, and \n"
"preprocessed it for the baseline model. The \n "
"original is named 'example' and the output is \n"
"named 'preprocessed_example'. \n"
"Take a look!")
import pdb; pdb.set_trace()