-
Notifications
You must be signed in to change notification settings - Fork 140
Description
Hello,
Thank you for sharing your KPCONV code! I'd like to ask if it is possible to add more features to the computation (i.e., XYZ, RGB, intensity or reflectance, no. of returns, return no.). I do not know if I am thinking right, but the changes should be don in datasets/Semantic3D.py (or other datasets.file) in line:
Lines 214 to 236 in 16bfbb9
| data = np.loadtxt(txt_file) | |
| points = data[:, :3].astype(np.float32) | |
| colors = data[:, 4:7].astype(np.uint8) | |
| if exists(label_file): | |
| # Load labels | |
| labels = np.loadtxt(label_file, dtype=np.int32) | |
| # Subsample to save space | |
| sub_points, sub_colors, sub_labels = grid_subsampling(points, | |
| features=colors, | |
| labels=labels, | |
| sampleDl=0.01) | |
| # Write the subsampled ply file | |
| write_ply(ply_file_full, (sub_points, sub_colors, sub_labels), ['x', 'y', 'z', 'red', 'green', 'blue', 'class']) | |
| else: | |
| # Write the full ply file | |
| write_ply(ply_file_full, (points, colors), ['x', 'y', 'z', 'red', 'green', 'blue']) |
by changing the structure of data file like XYZ, RGB, other
and
data = np.loadtxt(txt_file)
points = data[:, :3].astype(np.float32)
colors = data[:, 3:6].astype(np.uint8)
**addvalues = data[:, 6:9].astype(np.float32)**
and
sub_points, sub_colors, **sub_prop**, sub_labels = grid_subsampling(points,
features=colors,
**prop=addvalues,**
labels=labels,
sampleDl=0.01)
# Write the subsampled ply file
write_ply(ply_file_full, (sub_points, sub_colors, sub_prop, sub_labels), ['x', 'y', 'z', 'red', 'green', 'blue', **'reflectance', 'noreturn', 'returnno**', 'class'])
else:
# Write the full ply file
write_ply(ply_file_full, (points, colors,**addvalues**), ['x', 'y', 'z', 'red', 'green', 'blue', **'reflectance', 'noreturn', 'returnno'**])
then in
Lines 288 to 315 in 16bfbb9
| # read ply with data | |
| data = read_ply(sub_ply_file) | |
| sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T | |
| if cloud_split == 'test': | |
| sub_labels = None | |
| else: | |
| sub_labels = data['class'] | |
| # Read pkl with search tree | |
| with open(KDTree_file, 'rb') as f: | |
| search_tree = pickle.load(f) | |
| else: | |
| # Read ply file | |
| data = read_ply(file_path) | |
| points = np.vstack((data['x'], data['y'], data['z'])).T | |
| colors = np.vstack((data['red'], data['green'], data['blue'])).T | |
| if cloud_split == 'test': | |
| int_features = None | |
| else: | |
| int_features = data['class'] | |
| # Subsample cloud | |
| sub_data = grid_subsampling(points, | |
| features=colors, | |
| labels=int_features, | |
| sampleDl=subsampling_parameter) |
should be 1 line added:
data = read_ply(sub_ply_file)
sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T
**sub_prop = np.vstack((data['reflectance'], data['noreturn'], data['returnno'])).T**
and in else also:
else:
# Read ply file
data = read_ply(file_path)
points = np.vstack((data['x'], data['y'], data['z'])).T
colors = np.vstack((data['red'], data['green'], data['blue'])).T
**sub_prop = np.vstack((data['reflectance'], data['noreturn'], data['returnno'])).T**
if cloud_split == 'test':
and in
# Subsample cloud
sub_data = grid_subsampling(points,
features=colors,
**prop=addvalues,**
labels=int_features,
sampleDl=subsampling_parameter)
**But how about rescaling?
# Rescale float color and squeeze label
sub_prop = sub_data[2] / 50 #if the reflectance is used?
**
The next step is to change lines:
Lines 327 to 337 in 16bfbb9
| # Save ply | |
| if cloud_split == 'test': | |
| sub_labels = None | |
| write_ply(sub_ply_file, | |
| [sub_data[0], sub_colors], | |
| ['x', 'y', 'z', 'red', 'green', 'blue']) | |
| else: | |
| sub_labels = np.squeeze(sub_data[2]) | |
| write_ply(sub_ply_file, | |
| [sub_data[0], sub_colors, sub_labels], | |
| ['x', 'y', 'z', 'red', 'green', 'blue', 'class']) |
like:
# Save ply
if cloud_split == 'test':
sub_labels = None
write_ply(sub_ply_file,
[sub_data[0], sub_colors, sub_prop],
['x', 'y', 'z', 'red', 'green', 'blue', **'reflectance', 'noreturn', 'returnno'**])
else:
sub_labels = np.squeeze(sub_data[**3**])
write_ply(sub_ply_file,
[sub_data[0], sub_colors, sub_prop, sub_labels],
['x', 'y', 'z', 'red', 'green', 'blue', **'reflectance', 'noreturn', 'returnno'**, 'class'])
and:
Lines 339 to 343 in 16bfbb9
| # Fill data containers | |
| self.input_trees[cloud_split] += [search_tree] | |
| self.input_colors[cloud_split] += [sub_colors] | |
| if cloud_split in ['training', 'validation']: | |
| self.input_labels[cloud_split] += [sub_labels] |
# Fill data containers
self.input_trees[cloud_split] += [search_tree]
self.input_colors[cloud_split] += [sub_colors]
**self.input_addvalues[cloud_split] +=[sub_prop]**
if cloud_split in ['training', 'validation']:
self.input_labels[cloud_split] += [sub_labels]
then in line:
Lines 581 to 585 in 16bfbb9
| # Collect points and colors | |
| input_points = (points[input_inds] - pick_point).astype(np.float32) | |
| input_colors = self.input_colors[split][cloud_ind][input_inds] | |
| input_labels = self.input_labels[split][cloud_ind][input_inds] | |
| input_labels = np.array([self.label_to_idx[l] for l in input_labels]) |
# Collect points and colors
input_points = (points[input_inds] - pick_point).astype(np.float32)
input_colors = self.input_colors[split][cloud_ind][input_inds]
**input_addvalues=self.input_addvalues[split][cloud_ind][input_inds]**
input_labels = self.input_labels[split][cloud_ind][input_inds]
input_labels = np.array([self.label_to_idx[l] for l in input_labels])
in
Line 606 in 16bfbb9
| c_list += [np.hstack((input_colors, input_points + pick_point))] |
**
c_list += [np.hstack((input_colors, input_addvalues, input_points + pick_point))]**
in
Lines 675 to 677 in 16bfbb9
| # Collect points and colors | |
| input_points = (points[input_inds] - pick_point).astype(np.float32) | |
| input_colors = self.input_colors[data_split][cloud_ind][input_inds] |
# Collect points and colors
input_points = (points[input_inds] - pick_point).astype(np.float32)
input_colors = self.input_colors[data_split][cloud_ind][input_inds]
**input_addvalues=self.input_addvalues[split][cloud_ind][input_inds]**
Line 703 in 16bfbb9
| c_list += [np.hstack((input_colors, input_points + pick_point))] |
**
c_list += [np.hstack((input_colors, input_addvalues, input_points + pick_point))]**
Do the types and shapes have to be changed ?
Lines 736 to 738 in 16bfbb9
| # Define generated types and shapes | |
| gen_types = (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.int32) | |
| gen_shapes = ([None, 3], [None, 6], [None], [None], [None], [None]) |
and the last part:
Lines 745 to 792 in 16bfbb9
| def tf_map(stacked_points, stacked_colors, point_labels, stacks_lengths, point_inds, cloud_inds): | |
| # Get batch indice for each point | |
| batch_inds = self.tf_get_batch_inds(stacks_lengths) | |
| # Augment input points | |
| stacked_points, scales, rots = self.tf_augment_input(stacked_points, | |
| batch_inds, | |
| config) | |
| # First add a column of 1 as feature for the network to be able to learn 3D shapes | |
| stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32) | |
| # Get coordinates and colors | |
| stacked_original_coordinates = stacked_colors[:, 3:] | |
| stacked_colors = stacked_colors[:, :3] | |
| # Augmentation : randomly drop colors | |
| if config.in_features_dim in [4, 5]: | |
| num_batches = batch_inds[-1] + 1 | |
| s = tf.cast(tf.less(tf.random_uniform((num_batches,)), config.augment_color), tf.float32) | |
| stacked_s = tf.gather(s, batch_inds) | |
| stacked_colors = stacked_colors * tf.expand_dims(stacked_s, axis=1) | |
| # Then use positions or not | |
| if config.in_features_dim == 1: | |
| pass | |
| elif config.in_features_dim == 2: | |
| stacked_features = tf.concat((stacked_features, stacked_original_coordinates[:, 2:]), axis=1) | |
| elif config.in_features_dim == 3: | |
| stacked_features = stacked_colors | |
| elif config.in_features_dim == 4: | |
| stacked_features = tf.concat((stacked_features, stacked_colors), axis=1) | |
| elif config.in_features_dim == 5: | |
| stacked_features = tf.concat((stacked_features, stacked_colors, stacked_original_coordinates[:, 2:]), axis=1) | |
| elif config.in_features_dim == 7: | |
| stacked_features = tf.concat((stacked_features, stacked_colors, stacked_points), axis=1) | |
| else: | |
| raise ValueError('Only accepted input dimensions are 1, 3, 4 and 7 (without and with rgb/xyz)') | |
| # Get the whole input list | |
| input_list = self.tf_segmentation_inputs(config, | |
| stacked_points, | |
| stacked_features, | |
| point_labels, | |
| stacks_lengths, | |
| batch_inds) | |
def tf_map(stacked_points, stacked_colors, stacked_addvalues, point_labels, stacks_lengths, point_inds, cloud_inds):
# Get batch indice for each point
batch_inds = self.tf_get_batch_inds(stacks_lengths)
# Augment input points
stacked_points, scales, rots = self.tf_augment_input(stacked_points,
batch_inds,
config)
# First add a column of 1 as feature for the network to be able to learn 3D shapes
stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)
# Get coordinates and colors
stacked_original_coordinates = stacked_colors[:, 3:]
stacked_colors = stacked_colors[:, :3]
** # Get coordinates and addvalues
stacked_original_coordinates = stacked_addvalues[:, 3:]
stacked_addvalues = stacked_addvalues[:, :3]**
# Augmentation : randomly drop colors
if config.in_features_dim in [4, 5]:
num_batches = batch_inds[-1] + 1
s = tf.cast(tf.less(tf.random_uniform((num_batches,)), config.augment_color), tf.float32)
stacked_s = tf.gather(s, batch_inds)
stacked_colors = stacked_colors * tf.expand_dims(stacked_s, axis=1)
# Then use positions or not
if config.in_features_dim == 1:
pass
elif config.in_features_dim == 2:
stacked_features = tf.concat((stacked_features, stacked_original_coordinates[:, 2:]), axis=1)
elif config.in_features_dim == 3:
stacked_features = stacked_colors
elif config.in_features_dim == 4:
stacked_features = tf.concat((stacked_features, stacked_colors), axis=1)
elif config.in_features_dim == 5:
stacked_features = tf.concat((stacked_features, stacked_colors, stacked_original_coordinates[:, 2:]), axis=1)
elif config.in_features_dim == 7:
stacked_features = tf.concat((stacked_features, stacked_colors, stacked_points), axis=1)
** elif config.in_features_dim == 10:
stacked_features = tf.concat((stacked_features, stacked_colors, stacked_addvalues, stacked_points), axis=1)**
else:
raise ValueError('Only accepted input dimensions are 1, 3, 4 and 7 , 10(without and with rgb/xyz)')
Is there something else should be changed or the changes should be done in different way?