diff --git a/.github/workflows/docbuild.yml b/.github/workflows/docbuild.yml
index 2e9b91d7..8e3f6ebb 100644
--- a/.github/workflows/docbuild.yml
+++ b/.github/workflows/docbuild.yml
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["3.12"]
+ python-version: ["3.13"]
steps:
- name: Checkout repo
@@ -33,13 +33,16 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install .[all]
- sudo apt-get install -y xvfb
+ sudo apt-get install -y \
+ libgl1 \
+ libglx-mesa0 \
+ libegl1 \
+ libgl1-mesa-dri \
+ libvulkan1 \
+ mesa-vulkan-drivers \
+ vulkan-tools
- name: Build docs
- env:
- XVFB: 1
- DISPLAY: :99
run: |
- Xvfb :99 -screen 0 1280x1024x24 &
cd docs
make html
- name: Upload docs
diff --git a/.github/workflows/nightly_anisotropic_test.yml b/.github/workflows/nightly_anisotropic_test.yml
index 6cc044d6..ed9c0def 100644
--- a/.github/workflows/nightly_anisotropic_test.yml
+++ b/.github/workflows/nightly_anisotropic_test.yml
@@ -11,7 +11,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ["3.12"]
+ python-version: ["3.13"]
steps:
- name: Checkout repo
diff --git a/.github/workflows/nightly_basic_test.yml b/.github/workflows/nightly_basic_test.yml
index 7ec20096..9a2b24ba 100644
--- a/.github/workflows/nightly_basic_test.yml
+++ b/.github/workflows/nightly_basic_test.yml
@@ -11,7 +11,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ["3.12"]
+ python-version: ["3.13"]
steps:
- name: Checkout repo
diff --git a/.github/workflows/nightly_custom_test.yml b/.github/workflows/nightly_custom_test.yml
index fe9c2272..db3abb25 100644
--- a/.github/workflows/nightly_custom_test.yml
+++ b/.github/workflows/nightly_custom_test.yml
@@ -11,7 +11,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ["3.12"]
+ python-version: ["3.13"]
steps:
- name: Checkout repo
@@ -28,10 +28,17 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install .[dev,fury,afqbrowser,nn]
+ sudo apt-get install -y \
+ libgl1 \
+ libglx-mesa0 \
+ libegl1 \
+ libgl1-mesa-dri \
+ libvulkan1 \
+ mesa-vulkan-drivers \
+ vulkan-tools
- name: Lint
run: |
flake8 --ignore N802,N806,W503 --select W504 `find . -name \*.py | grep -v setup.py | grep -v version.py | grep -v __init__.py | grep -v /docs/`
- name: Test
run: |
- export TEST_WITH_XVFB=true
cd && mkdir for_test && cd for_test && pytest --pyargs AFQ --cov-report term-missing --cov=AFQ -m "nightly_custom" --durations=0
diff --git a/.github/workflows/nightly_reco80_test.yml b/.github/workflows/nightly_reco80_test.yml
index a2d2486a..7eaf8f47 100644
--- a/.github/workflows/nightly_reco80_test.yml
+++ b/.github/workflows/nightly_reco80_test.yml
@@ -11,7 +11,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ["3.12"]
+ python-version: ["3.13"]
steps:
- name: Checkout repo
diff --git a/.github/workflows/nightly_reco_test.yml b/.github/workflows/nightly_reco_test.yml
index b94eb7d7..32d1eefe 100644
--- a/.github/workflows/nightly_reco_test.yml
+++ b/.github/workflows/nightly_reco_test.yml
@@ -11,7 +11,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ["3.12"]
+ python-version: ["3.13"]
steps:
- name: Checkout repo
diff --git a/.github/workflows/nightly_test.yml b/.github/workflows/nightly_test.yml
index eb7d529e..f9b9d9e9 100644
--- a/.github/workflows/nightly_test.yml
+++ b/.github/workflows/nightly_test.yml
@@ -11,7 +11,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ["3.12"]
+ python-version: ["3.13"]
steps:
- name: Checkout repo
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 9a8712b8..2ea10bf7 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -13,7 +13,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ["3.10", "3.11", "3.12"]
+ python-version: ["3.11", "3.12", "3.13"]
nibabel-pre: [true, false]
steps:
diff --git a/AFQ/_fixes.py b/AFQ/_fixes.py
index 3d92664c..9a62f20a 100644
--- a/AFQ/_fixes.py
+++ b/AFQ/_fixes.py
@@ -1,10 +1,13 @@
import logging
import math
+import tempfile
+from math import radians
import numpy as np
from dipy.data import default_sphere
from dipy.reconst.gqi import squared_radial_component
from dipy.tracking.streamline import set_number_of_points
+from PIL import Image
from scipy.linalg import blas, pinvh
from scipy.special import gammaln, lpmv
from tqdm import tqdm
@@ -238,3 +241,44 @@ def gaussian_weights(bundle, n_points=100, return_mahalnobis=False, stat=np.mean
weights = 1 / weights
# Normalize before returning, so that the weights in each node sum to 1:
return weights / np.sum(weights, 0)
+
+
+def make_gif(show_m, out_path, n_frames=36, az_ang=-10):
+ """
+ Make a video from a Fury Show Manager.
+
+ Parameters
+ ----------
+ show_m : Fury Show Manager
+ The Fury Show Manager to use for rendering.
+
+ out_path : str
+ The name of the output file.
+
+ n_frames : int
+ The number of frames to render.
+ Default: 36
+
+ az_ang : float
+ The angle to rotate the camera around the
+ z-axis for each frame, in degrees.
+ Default: -10
+ """
+ video = []
+
+ show_m.render()
+ show_m.window.draw()
+
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ for ii in tqdm(range(n_frames), desc="Generating GIF"):
+ frame_fname = f"{tmp_dir}/{ii}.png"
+ show_m.screens[0].controller.rotate((radians(az_ang), 0), None)
+ show_m.render()
+ show_m.window.draw()
+ show_m.snapshot(frame_fname)
+ video.append(frame_fname)
+
+ video = [Image.open(frame) for frame in video]
+ video[0].save(
+ out_path, save_all=True, append_images=video[1:], duration=300, loop=1
+ )
diff --git a/AFQ/api/group.py b/AFQ/api/group.py
index a94c6949..2c225a9d 100644
--- a/AFQ/api/group.py
+++ b/AFQ/api/group.py
@@ -836,7 +836,6 @@ def group_montage(self, bundle_name, size, view, direc, slice_pos=None):
t1_dict = self.export("t1_masked", collapse=False)
viz_backend_dict = self.export("viz_backend", collapse=False)
- b0_backend_dict = self.export("b0", collapse=False)
dwi_affine_dict = self.export("dwi_affine", collapse=False)
bundles_dict = self.export("bundles", collapse=False)
best_scalar_dict = self.export(best_scalar, collapse=False)
@@ -847,7 +846,6 @@ def group_montage(self, bundle_name, size, view, direc, slice_pos=None):
this_sub = self.valid_sub_list[ii]
this_ses = self.valid_ses_list[ii]
viz_backend = viz_backend_dict[this_sub][this_ses]
- b0 = b0_backend_dict[this_sub][this_ses]
t1 = nib.load(t1_dict[this_sub][this_ses])
dwi_affine = dwi_affine_dict[this_sub][this_ses]
bundles = bundles_dict[this_sub][this_ses]
@@ -913,17 +911,15 @@ def group_montage(self, bundle_name, size, view, direc, slice_pos=None):
pio.kaleido.scope._shutdown_kaleido()
else:
- from dipy.viz import window
-
- direc = np.fromiter(eye.values(), dtype=int)
- data_shape = np.asarray(nib.load(b0).get_fdata().shape)
- figure.set_camera(
- position=direc * data_shape,
- focal_point=data_shape // 2,
- view_up=(0, 0, 1),
+ from fury import window
+
+ from AFQ.viz.fury_backend import scene_rotate_forward
+
+ show_m = window.ShowManager(
+ scene=figure, window_type="offscreen", size=(600, 600)
)
- figure.zoom(0.5)
- window.snapshot(figure, fname=this_fname, size=(600, 600))
+ scene_rotate_forward(show_m, figure)
+ show_m.snapshot(this_fname)
def _save_file(curr_img, curr_file_num):
save_path = op.abspath(
diff --git a/AFQ/api/participant.py b/AFQ/api/participant.py
index fd0e08e6..827b38b5 100644
--- a/AFQ/api/participant.py
+++ b/AFQ/api/participant.py
@@ -5,7 +5,6 @@
from time import time
import nibabel as nib
-import numpy as np
from PIL import Image, ImageDraw, ImageFont
from tqdm import tqdm
@@ -317,17 +316,15 @@ def participant_montage(self, images_per_row=2):
pio.kaleido.scope._shutdown_kaleido()
else:
- from dipy.viz import window
-
- direc = np.fromiter(eye.values(), dtype=int)
- data_shape = np.asarray(nib.load(self.export("b0")).get_fdata().shape)
- figure.set_camera(
- position=direc * data_shape,
- focal_point=data_shape // 2,
- view_up=(0, 0, 1),
+ from fury import window
+
+ from AFQ.viz.fury_backend import scene_rotate_forward
+
+ show_m = window.ShowManager(
+ scene=figure, window_type="offscreen", size=(600, 600)
)
- figure.zoom(0.5)
- window.snapshot(figure, fname=this_fname, size=(600, 600))
+ scene_rotate_forward(show_m, figure)
+ show_m.snapshot(this_fname)
def _save_file(curr_img):
save_path = op.abspath(
diff --git a/AFQ/tasks/viz.py b/AFQ/tasks/viz.py
index 065f8799..145566c6 100644
--- a/AFQ/tasks/viz.py
+++ b/AFQ/tasks/viz.py
@@ -385,16 +385,12 @@ def plot_tract_profiles(base_fname, output_dir, scalars, segmentation_imap):
@immlib.calc("viz_backend")
-def init_viz_backend(viz_backend_spec="plotly_no_gif", virtual_frame_buffer=False):
+def init_viz_backend(viz_backend_spec="plotly_no_gif"):
"""
An instance of the `AFQ.viz.utils.viz_backend` class.
Parameters
----------
- virtual_frame_buffer : bool, optional
- Whether to use a virtual frame buffer. This is if
- generating GIFs in a headless environment.
- Default: False
viz_backend_spec : str, optional
Which visualization backend to use.
See Visualization Backends page in documentation for details
@@ -402,17 +398,9 @@ def init_viz_backend(viz_backend_spec="plotly_no_gif", virtual_frame_buffer=Fals
One of {"fury", "plotly", "plotly_no_gif"}.
Default: "plotly_no_gif"
"""
- if not isinstance(virtual_frame_buffer, bool):
- raise TypeError("virtual_frame_buffer must be a bool")
if "fury" not in viz_backend_spec and "plotly" not in viz_backend_spec:
raise TypeError("viz_backend_spec must contain either 'fury' or 'plotly'")
- if virtual_frame_buffer:
- from xvfbwrapper import Xvfb
-
- vdisplay = Xvfb(width=1280, height=1280)
- vdisplay.start()
-
return Viz(backend=viz_backend_spec.lower())
diff --git a/AFQ/tests/test_api.py b/AFQ/tests/test_api.py
index 9771ea68..c5bfdc8c 100644
--- a/AFQ/tests/test_api.py
+++ b/AFQ/tests/test_api.py
@@ -17,7 +17,6 @@
import toml
from dipy.io.streamline import load_tractogram
from dipy.segment.metric import mdf
-from dipy.testing.decorators import xvfb_it
from pandas.testing import assert_series_equal
from pcollections._lazy import LazyError
@@ -272,7 +271,6 @@ def test_AFQ_no_derivs():
@pytest.mark.nightly_custom
-@xvfb_it
def test_AFQ_fury():
tmpdir = tempfile.TemporaryDirectory()
bids_path = op.join(tmpdir.name, "stanford_hardi")
diff --git a/AFQ/viz/fury_backend.py b/AFQ/viz/fury_backend.py
index d8771f20..5040fbc9 100644
--- a/AFQ/viz/fury_backend.py
+++ b/AFQ/viz/fury_backend.py
@@ -1,15 +1,14 @@
import logging
-import os.path as op
-import tempfile
+from math import radians
import numpy as np
from dipy.tracking.streamline import set_number_of_points
import AFQ.viz.utils as vut
+from AFQ._fixes import make_gif
try:
- import IPython.display as display
- from dipy.viz import actor, ui, window
+ from fury import actor, window
from fury.colormap import line_colors
except (ImportError, ModuleNotFoundError) as e:
raise ImportError(vut.viz_import_msg_error("fury")) from e
@@ -23,14 +22,19 @@ def _inline_interact(scene, inline, interact):
"""
if interact:
viz_logger.info("Showing interactive scene...")
- window.show(scene)
+ show_m = window.ShowManager(
+ scene=scene, size=(1200, 1200), window_type="default"
+ )
+ show_m.start()
if inline:
viz_logger.info("Showing inline scene...")
- tdir = tempfile.gettempdir()
- fname = op.join(tdir, "fig.png")
- window.snapshot(scene, fname=fname, size=(1200, 1200))
- display.display_png(display.Image(fname))
+ show_m = window.ShowManager(
+ scene=scene,
+ size=(1200, 1200),
+ window_type="jupyter",
+ )
+ show_m.start()
return scene
@@ -112,7 +116,7 @@ def visualize_bundles(
if figure is None:
figure = window.Scene()
- figure.SetBackground(background[0], background[1], background[2])
+ figure.background = (background[0], background[1], background[2])
for sls, color, name, dimensions in vut.tract_generator(
seg_sft, bundle, colors, n_points, img
@@ -129,76 +133,63 @@ def visualize_bundles(
sl[:, 2] = dimensions[2] - sl[:, 2]
if color_by_direction:
- sl_actor = actor.line(sls, opacity=opacity)
+ sl_actor = actor.streamlines(sls, opacity=opacity, thickness=line_width)
else:
- sl_actor = actor.line(sls, color, opacity=opacity)
+ sl_actor = actor.streamlines(
+ sls, colors=color, opacity=opacity, thickness=line_width
+ )
figure.add(sl_actor)
- sl_actor.GetProperty().SetRenderLinesAsTubes(1)
- sl_actor.GetProperty().SetLineWidth(line_width)
return _inline_interact(figure, inline, interact)
-def scene_rotate_forward(scene):
- scene.elevation(90)
- scene.set_camera(view_up=(0.0, 0.0, 1.0))
- scene.reset_camera()
- return scene
+def scene_rotate_forward(show_m, scene):
+ window.update_camera(show_m.screens[0].camera, None, scene)
+ show_m.screens[0].controller.rotate((0, radians(-90)), None)
+ show_m.render()
+ show_m.window.draw()
def create_gif(
figure,
file_name,
- n_frames=60,
- zoom=1,
- z_offset=0.5,
+ n_frames=36,
+ az_ang=-10,
size=(600, 600),
- rotate_forward=True,
):
"""
Convert a Fury Scene object into a gif
+ Make a video from a Fury Show Manager.
+
Parameters
----------
- figure: Fury Scene object
- Scene to be converted to a gif
+ figure : Fury Scene object
+ The Fury Scene object to render.
- file_name: str
- File to save gif to.
+ file_name : str
+ The name of the output file.
- n_frames: int, optional
- Number of frames in gif.
- Will be evenly distributed throughout the rotation.
- Default: 60
+ n_frames : int
+ The number of frames to render.
+ Default: 36
- zoom: int, optional
- How much to magnify the figure in the fig.
- Default: 1
+ az_ang : float
+ The angle to rotate the camera around the
+ z-axis for each frame, in degrees.
+ Default: -10
- size: tuple, optional
- Size of the gif.
+ size : tuple
+ The size of the output gif, in pixels.
Default: (600, 600)
-
- rotate_forward: bool, optional
- Whether to rotate the figure forward before converting to a gif.
- Generally necessary for fury scenes.
- Default: True
"""
- if rotate_forward:
- figure = scene_rotate_forward(figure)
-
- tdir = tempfile.gettempdir()
- window.record(
- figure,
- az_ang=360.0 / n_frames,
- n_frames=n_frames,
- path_numbering=True,
- out_path=tdir + "/tgif",
- magnification=zoom,
+ show_m = window.ShowManager(
+ scene=figure,
+ window_type="offscreen",
size=size,
)
-
- vut.gif_from_pngs(tdir, file_name, n_frames, png_fname="tgif", add_zeros=True)
+ scene_rotate_forward(show_m, figure)
+ make_gif(show_m, file_name, n_frames=n_frames, az_ang=az_ang)
def visualize_roi(
@@ -338,133 +329,14 @@ def visualize_volume(
figure = window.Scene()
shape = volume.shape
- image_actor_z = actor.slicer(volume)
- slicer_opacity = opacity
- image_actor_z.opacity(slicer_opacity)
-
- image_actor_x = image_actor_z.copy()
if x is None:
- x = int(np.round(shape[0] / 2))
- image_actor_x.display_extent(x, x, 0, shape[1] - 1, 0, shape[2] - 1)
-
- image_actor_y = image_actor_z.copy()
-
+ x = shape[0] // 2
if y is None:
- y = int(np.round(shape[1] / 2))
- image_actor_y.display_extent(0, shape[0] - 1, y, y, 0, shape[2] - 1)
-
- figure.add(image_actor_z)
- figure.add(image_actor_x)
- figure.add(image_actor_y)
-
- show_m = window.ShowManager(figure, size=(1200, 900))
- show_m.initialize()
-
- if interact:
- line_slider_z = ui.LineSlider2D(
- min_value=0,
- max_value=shape[2] - 1,
- initial_value=shape[2] / 2,
- text_template="{value:.0f}",
- length=140,
- )
-
- line_slider_x = ui.LineSlider2D(
- min_value=0,
- max_value=shape[0] - 1,
- initial_value=shape[0] / 2,
- text_template="{value:.0f}",
- length=140,
- )
-
- line_slider_y = ui.LineSlider2D(
- min_value=0,
- max_value=shape[1] - 1,
- initial_value=shape[1] / 2,
- text_template="{value:.0f}",
- length=140,
- )
-
- opacity_slider = ui.LineSlider2D(
- min_value=0.0, max_value=1.0, initial_value=slicer_opacity, length=140
- )
-
- def change_slice_z(slider):
- z = int(np.round(slider.value))
- image_actor_z.display_extent(0, shape[0] - 1, 0, shape[1] - 1, z, z)
-
- def change_slice_x(slider):
- x = int(np.round(slider.value))
- image_actor_x.display_extent(x, x, 0, shape[1] - 1, 0, shape[2] - 1)
-
- def change_slice_y(slider):
- y = int(np.round(slider.value))
- image_actor_y.display_extent(0, shape[0] - 1, y, y, 0, shape[2] - 1)
-
- def change_opacity(slider):
- slicer_opacity = slider.value
- image_actor_z.opacity(slicer_opacity)
- image_actor_x.opacity(slicer_opacity)
- image_actor_y.opacity(slicer_opacity)
-
- line_slider_z.on_change = change_slice_z
- line_slider_x.on_change = change_slice_x
- line_slider_y.on_change = change_slice_y
- opacity_slider.on_change = change_opacity
-
- def build_label(text):
- label = ui.TextBlock2D()
- label.message = text
- label.font_size = 18
- label.font_family = "Arial"
- label.justification = "left"
- label.bold = False
- label.italic = False
- label.shadow = False
- label.background = (0, 0, 0)
- label.color = (1, 1, 1)
-
- return label
-
- line_slider_label_z = build_label(text="Z Slice")
- line_slider_label_x = build_label(text="X Slice")
- line_slider_label_y = build_label(text="Y Slice")
- opacity_slider_label = build_label(text="Opacity")
-
- panel = ui.Panel2D(size=(300, 200), color=(1, 1, 1), opacity=0.1, align="right")
- panel.center = (1030, 120)
-
- panel.add_element(line_slider_label_x, (0.1, 0.75))
- panel.add_element(line_slider_x, (0.38, 0.75))
- panel.add_element(line_slider_label_y, (0.1, 0.55))
- panel.add_element(line_slider_y, (0.38, 0.55))
- panel.add_element(line_slider_label_z, (0.1, 0.35))
- panel.add_element(line_slider_z, (0.38, 0.35))
- panel.add_element(opacity_slider_label, (0.1, 0.15))
- panel.add_element(opacity_slider, (0.38, 0.15))
-
- show_m.scene.add(panel)
-
- global size
- size = figure.GetSize()
-
- def win_callback(obj, event):
- global size
- if size != obj.GetSize():
- size_old = size
- size = obj.GetSize()
- size_change = [size[0] - size_old[0], 0]
- panel.re_align(size_change)
-
- show_m.initialize()
-
- figure.zoom(1.5)
- figure.reset_clipping_range()
-
- if interact:
- show_m.add_window_callback(win_callback)
- show_m.render()
- show_m.start()
+ y = shape[1] // 2
+ if z is None:
+ z = shape[2] // 2
+ slicer_actor = actor.data_slicer(volume, opacity=opacity, initial_slices=(x, y, z))
+ figure.add(slicer_actor)
return _inline_interact(figure, inline, interact)
@@ -523,10 +395,8 @@ def _draw_core(
if flip_axes[2]:
fgarray[:, 2] = dimensions[2] - fgarray[:, 2]
- sl_actor = actor.line([fgarray], line_color)
+ sl_actor = actor.streamlines([fgarray], colors=line_color, thickness=20)
figure.add(sl_actor)
- sl_actor.GetProperty().SetRenderLinesAsTubes(1)
- sl_actor.GetProperty().SetLineWidth(20)
return line_color_untouched
@@ -592,7 +462,7 @@ def single_bundle_viz(
flip_axes = [False, False, False]
if figure is None:
figure = window.Scene()
- figure.SetBackground(1, 1, 1)
+ figure.background = (1, 1, 1)
n_points = len(indiv_profile)
sls, _, bundle_name, dimensions = next(
diff --git a/AFQ/viz/plot.py b/AFQ/viz/plot.py
index 90749294..2577c93e 100644
--- a/AFQ/viz/plot.py
+++ b/AFQ/viz/plot.py
@@ -439,7 +439,7 @@ def reco_flip(df):
self.prof_len = 100 - (percent_nan_tol // 2) * 2
if bundles is None:
self.bundles = self.profile_dict[names[0]]["tractID"].unique()
- self.bundles.sort()
+ self.bundles = sorted(self.bundles.tolist())
else:
self.bundles = bundles
self.color_dict = vut.gen_color_dict([*self.bundles, "median"])
diff --git a/docs/source/reference/viz_backend.rst b/docs/source/reference/viz_backend.rst
index fff6ced9..eda95b68 100644
--- a/docs/source/reference/viz_backend.rst
+++ b/docs/source/reference/viz_backend.rst
@@ -21,11 +21,7 @@ backend. Currently, there are three choices:
our current setup in Plotly, Fury can generate GIFs quickly. To use this
backend, install pyAFQ with the optional fury requirements:
pip install pyAFQ[fury]
- And install `libGL `_. If running
- fury on a headless environment, additionally install and use
- `Xvfb `_.
- Xvfb is used automatically in the api.GroupAFQ object if the virtual_frame_buffer
- argument is set to True.
+ And install `libGL `_.
By default, plotly_no_gif is used. Fury requires additional
installations and does not make interactive figures, and Plotly takes a
@@ -35,12 +31,18 @@ significant amount of time to generate rotating GIFs.
Fury Dockerfile for Cloudknot
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If using the fury visualization backend while running pyAFQ on cloudknot, you
-must provide a base image with the fury requiements installed.
+must provide a base image with the fury requirements installed.
Below is an example dockerfile that can be used to build that base image::
# Use official python base image
- FROM python:3.9
+ FROM python:3.13
# Install libgl
RUN apt-get update
- RUN apt-get install -y libgl1-mesa-glx
- RUN apt-get install -y xvfb
+ RUN apt-get install -y \
+ libgl1 \
+ libglx-mesa0 \
+ libegl1 \
+ libgl1-mesa-dri \
+ libvulkan1 \
+ mesa-vulkan-drivers \
+ vulkan-tools
diff --git a/examples/howto_examples/plot_stages_of_tractometry.py b/examples/howto_examples/plot_stages_of_tractometry.py
index a98e8e4b..3058befb 100644
--- a/examples/howto_examples/plot_stages_of_tractometry.py
+++ b/examples/howto_examples/plot_stages_of_tractometry.py
@@ -21,72 +21,27 @@
#
-import os
import os.path as op
import nibabel as nib
import numpy as np
-import tempfile
+from math import radians
from dipy.io.streamline import load_trk
from dipy.tracking.streamline import (transform_streamlines,
- set_number_of_points)
+ set_number_of_points,
+ values_from_volume)
from dipy.core.gradients import gradient_table
from dipy.align import resample
+from dipy.stats.analysis import afq_profile
from fury import actor, window
-from fury.actor import colormap_lookup_table
from fury.colormap import create_colormap
from matplotlib.cm import tab20
import AFQ.data.fetch as afd
from AFQ.viz.utils import gen_color_dict
+from AFQ._fixes import make_gif
-from PIL import Image
-
-##############################################################################
-# Define a function that makes videos
-# -----------------------------------
-# The PIL library has a function that can be used to create animated GIFs from
-# a series of images. We will use this function to create videos.
-#
-# .. note::
-# This function is not part of the AFQ library, but is included here for
-# convenience. It is not necessary to understand this function in order to
-# understand the rest of the example. If you are interested in learning more
-# about this function, you can read the PIL documentation. The function is
-# based on the `PIL.Image.save `_
-# function.
-
-
-def make_video(frames, out):
- """
- Make a video from a series of frames.
-
- Parameters
- ----------
- frames : list of str
- A list of file names of the frames to be included in the video.
-
- out : str
- The name of the output file. Format is determined by the file
- extension.
- """
- video = []
- for nn in frames:
- frame = Image.open(nn)
- video.append(frame)
-
- # Save the frames as an animated GIF
- video[0].save(
- out,
- save_all=True,
- append_images=video[1:],
- duration=300,
- loop=1)
-
-
-tmp = tempfile.mkdtemp()
-n_frames = 72
###############################################################################
# Get data from HBN POD2
@@ -105,7 +60,7 @@ def make_video(frames, out):
# Here, we will start by visualizing the diffusion data. We read in the
# diffusion data, as well as the gradient table, using the `nibabel` library.
# We then extract the b0, b1000, and b2000 volumes from the diffusion data.
-# We will use the `actor.slicer` function from `fury` to visualize these. This
+# We will use the `actor.data_slicer` function from `fury` to visualize these. This
# function takes a 3D volume as input and returns a `slicer` actor, which can
# then be added to a `window.Scene` object. We create a helper function that
# will create a slicer actor for a given volume and a given slice along the x,
@@ -143,65 +98,41 @@ def make_video(frames, out):
def slice_volume(data, x=None, y=None, z=None):
- slicer_actors = []
- slicer_actor_z = actor.slicer(data)
- if z is not None:
- slicer_actor_z.display_extent(
- 0, data.shape[0] - 1,
- 0, data.shape[1] - 1,
- z, z)
- slicer_actors.append(slicer_actor_z)
- if y is not None:
- slicer_actor_y = slicer_actor_z.copy()
- slicer_actor_y.display_extent(
- 0, data.shape[0] - 1,
- y, y,
- 0, data.shape[2] - 1)
- slicer_actors.append(slicer_actor_y)
- if x is not None:
- slicer_actor_x = slicer_actor_z.copy()
- slicer_actor_x.display_extent(
- x, x,
- 0, data.shape[1] - 1,
- 0, data.shape[2] - 1)
- slicer_actors.append(slicer_actor_x)
-
- return slicer_actors
-
-
-slicers_b0 = slice_volume(
+ if x is None:
+ x = data.shape[0] // 2
+ if y is None:
+ y = data.shape[1] // 2
+ if z is None:
+ z = data.shape[2] // 2
+ slicer_actor = actor.data_slicer(
+ data,
+ initial_slices=(x, y, z))
+ return slicer_actor
+
+
+slicer_b0 = slice_volume(
dmri_b0,
- x=dmri_b0.shape[0] // 2,
- y=dmri_b0.shape[1] // 2,
z=dmri_b0.shape[-1] // 3)
-slicers_b1000 = slice_volume(
+slicer_b1000 = slice_volume(
dmri_b1000,
- x=dmri_b0.shape[0] // 2,
- y=dmri_b0.shape[1] // 2,
- z=dmri_b0.shape[-1] // 3)
-slicers_b2000 = slice_volume(
+ z=dmri_b1000.shape[-1] // 3)
+slicer_b2000 = slice_volume(
dmri_b2000,
- x=dmri_b0.shape[0] // 2,
- y=dmri_b0.shape[1] // 2,
- z=dmri_b0.shape[-1] // 3)
+ z=dmri_b2000.shape[-1] // 3)
-for bval, slicers in zip([0, 1000, 2000],
- [slicers_b0, slicers_b1000, slicers_b2000]):
+for bval, slicer in zip([0, 1000, 2000],
+ [slicer_b0, slicer_b1000, slicer_b2000]):
scene = window.Scene()
- for slicer in slicers:
- scene.add(slicer)
- scene.set_camera(position=(721.34, 393.48, 97.03),
- focal_point=(96.00, 114.00, 96.00),
- view_up=(-0.01, 0.02, 1.00))
-
- scene.background((1, 1, 1))
- window.record(scene, out_path=f'{tmp}/b{bval}',
- size=(2400, 2400),
- n_frames=n_frames, path_numbering=True)
+ scene.add(slicer)
+ scene.background = (1, 1, 1)
- make_video(
- [f'{tmp}/b{bval}{ii:06d}.png' for ii in range(n_frames)],
- f'b{bval}.gif')
+ show_m = window.ShowManager(
+ scene=scene, window_type="offscreen",
+ size=(2400, 2400)
+ )
+ window.update_camera(show_m.screens[0].camera, None, slicer)
+ show_m.screens[0].controller.rotate((0, radians(-90)), None)
+ make_gif(show_m, f'b{bval}.gif')
#############################################################################
# Visualizing whole-brain tractography
@@ -250,32 +181,27 @@ def slice_volume(data, x=None, y=None, z=None):
#
-def lines_as_tubes(sl, line_width, **kwargs):
- line_actor = actor.line(sl, **kwargs)
- line_actor.GetProperty().SetRenderLinesAsTubes(1)
- line_actor.GetProperty().SetLineWidth(line_width)
- return line_actor
+whole_brain_actor = actor.streamlines(whole_brain_t1w, thickness=2)
+slicer = slice_volume(t1w, y=t1w.shape[1] // 2 - 5, z=t1w.shape[-1] // 3)
+
+def rotate_to_anterior(show_m):
+ window.update_camera(show_m.screens[0].camera, None, slicer)
+ show_m.screens[0].controller.rotate((0, radians(-90)), None)
-whole_brain_actor = lines_as_tubes(whole_brain_t1w, 2)
-slicers = slice_volume(t1w, y=t1w.shape[1] // 2 - 5, z=t1w.shape[-1] // 3)
scene = window.Scene()
scene.add(whole_brain_actor)
-for slicer in slicers:
- scene.add(slicer)
-
-scene.set_camera(position=(721.34, 393.48, 97.03),
- focal_point=(96.00, 114.00, 96.00),
- view_up=(-0.01, 0.02, 1.00))
-
-scene.background((1, 1, 1))
-window.record(scene, out_path=f'{tmp}/whole_brain', size=(2400, 2400),
- n_frames=n_frames, path_numbering=True)
+scene.add(slicer)
-make_video([f"{tmp}/whole_brain{ii:06d}.png" for ii in range(n_frames)],
- "whole_brain.gif")
+scene.background = (1, 1, 1)
+show_m = window.ShowManager(
+ scene=scene, window_type="offscreen",
+ size=(2400, 2400)
+)
+rotate_to_anterior(show_m)
+make_gif(show_m, "whole_brain.gif")
#############################################################################
# Whole brain with waypoints
@@ -289,13 +215,12 @@ def lines_as_tubes(sl, line_width, **kwargs):
# https://docs.dipy.org/1.11.0/examples_built/registration/syn_registration_3d.html
scene.clear()
-whole_brain_actor = lines_as_tubes(whole_brain_t1w, 2)
+whole_brain_actor = actor.streamlines(whole_brain_t1w, thickness=2)
scene.add(whole_brain_actor)
-for slicer in slicers:
- scene.add(slicer)
+scene.add(slicer)
-scene.background((1, 1, 1))
+scene.background = (1, 1, 1)
waypoint1 = nib.load(
op.join(
@@ -325,31 +250,24 @@ def lines_as_tubes(sl, line_width, **kwargs):
scene.add(waypoint1_actor)
scene.add(waypoint2_actor)
-window.record(scene, out_path=f'{tmp}/whole_brain_with_waypoints', size=(2400, 2400),
- n_frames=n_frames, path_numbering=True)
-
-make_video([f"{tmp}/whole_brain_with_waypoints{ii:06d}.png" for ii in range(n_frames)],
- "whole_brain_with_waypoints.gif")
+show_m = window.ShowManager(
+ scene=scene, window_type="offscreen",
+ size=(2400, 2400)
+)
+rotate_to_anterior(show_m)
+make_gif(show_m, "whole_brain_with_waypoints.gif")
bundle_path = op.join(afq_path,
'bundles')
-#############################################################################
-# Visualize the arcuate bundle
-# ----------------------------
-# Now visualize only the arcuate bundle that is selected with these waypoints.
-#
-
-fa_img = nib.load(op.join(afq_path,
- 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_model-DKI_FA.nii.gz'))
-fa = fa_img.get_fdata()
-sft_arc = load_trk(op.join(bundle_path,
- 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_space-RASMM_model-CSD_desc-prob-afq-ARC_L_tractography.trk'), fa_img)
-
-sft_arc.to_rasmm()
-arc_t1w = transform_streamlines(sft_arc.streamlines,
- np.linalg.inv(t1w_img.affine))
-
+############################################
+# Define the bundles
+# The bundles are defined by the waypoints that we just visualized. Here
+# we organize some names of bundles we want to visualize.
+# In current pyAFQ, only the formal names are used. But for this example,
+# we will use derivatives from previous versions of pyAFQ, where names
+# were abbreviated. We have standardized colors for each bundle,
+# provided by `gen_color_dict`, which we will use for visualization.
bundles = [
"ARC_R",
@@ -372,22 +290,66 @@ def lines_as_tubes(sl, line_width, **kwargs):
"ARC_L",
]
-color_dict = gen_color_dict(bundles)
+formal_bundles = [
+ "Right Arcuate",
+ "Right Anterior Thalamic",
+ "Right Corticospinal",
+ "Right Inferior Fronto-Occipital",
+ "Right Inferior Longitudinal",
+ "Right Superior Longitudinal",
+ "Right Uncinate",
+ "Right Cingulum Cingulate",
+ "Callosum Orbital",
+ "Callosum Anterior Frontal",
+ "Callosum Superior Frontal",
+ "Callosum Motor",
+ "Callosum Superior Parietal",
+ "Callosum Posterior Parietal",
+ "Callosum Temporal",
+ "Callosum Occipital",
+ "Left Cingulum Cingulate",
+ "Left Uncinate",
+ "Left Superior Longitudinal",
+ "Left Inferior Longitudinal",
+ "Left Inferior Fronto-Occipital",
+ "Left Corticospinal",
+ "Left Anterior Thalamic",
+ "Left Arcuate",
+]
+
+color_dict = gen_color_dict(formal_bundles)
-arc_actor = lines_as_tubes(arc_t1w, 8, colors=color_dict['ARC_L'])
+#############################################################################
+# Visualize the arcuate bundle
+# ----------------------------
+# Now visualize only the arcuate bundle that is selected with these waypoints.
+#
+
+fa_img = nib.load(op.join(afq_path,
+ 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_model-DKI_FA.nii.gz'))
+fa = fa_img.get_fdata()
+sft_arc = load_trk(op.join(bundle_path,
+ 'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_space-RASMM_model-CSD_desc-prob-afq-ARC_L_tractography.trk'), fa_img)
+
+sft_arc.to_rasmm()
+arc_t1w = transform_streamlines(sft_arc.streamlines,
+ np.linalg.inv(t1w_img.affine))
+
+arc_actor = actor.streamlines(arc_t1w, thickness=8, colors=color_dict['Left Arcuate'])
scene.clear()
scene.add(arc_actor)
-for slicer in slicers:
- scene.add(slicer)
+scene.add(slicer)
scene.add(waypoint1_actor)
scene.add(waypoint2_actor)
-window.record(scene, out_path=f'{tmp}/arc1', size=(2400, 2400),
- n_frames=n_frames, path_numbering=True)
-
-make_video([f"{tmp}/arc1{ii:06d}.png" for ii in range(n_frames)], "arc1.gif")
+show_m = window.ShowManager(
+ scene=scene, window_type="offscreen",
+ size=(2400, 2400)
+)
+rotate_to_anterior(show_m)
+make_gif(show_m, "arc1.gif")
#############################################################################
# Clean bundle
@@ -398,13 +360,14 @@ def lines_as_tubes(sl, line_width, **kwargs):
scene.clear()
scene.add(arc_actor)
-for slicer in slicers:
- scene.add(slicer)
+scene.add(slicer)
-window.record(scene, out_path=f'{tmp}/arc2', size=(2400, 2400),
- n_frames=n_frames, path_numbering=True)
-
-make_video([f"{tmp}/arc2{ii:06d}.png" for ii in range(n_frames)], "arc2.gif")
+show_m = window.ShowManager(
+ scene=scene, window_type="offscreen",
+ size=(2400, 2400)
+)
+rotate_to_anterior(show_m)
+make_gif(show_m, "arc2.gif")
clean_bundles_path = op.join(afq_path,
'clean_bundles')
@@ -416,18 +379,18 @@ def lines_as_tubes(sl, line_width, **kwargs):
arc_t1w = transform_streamlines(sft_arc.streamlines,
np.linalg.inv(t1w_img.affine))
-
-arc_actor = lines_as_tubes(arc_t1w, 8, colors=tab20.colors[18])
+arc_actor = actor.streamlines(arc_t1w, thickness=8, colors=tab20.colors[18])
scene.clear()
scene.add(arc_actor)
-for slicer in slicers:
- scene.add(slicer)
-
-window.record(scene, out_path=f'{tmp}/arc3', size=(2400, 2400),
- n_frames=n_frames, path_numbering=True)
+scene.add(slicer)
-make_video([f"{tmp}/arc3{ii:06d}.png" for ii in range(n_frames)], "arc3.gif")
+show_m = window.ShowManager(
+ scene=scene, window_type="offscreen",
+ size=(2400, 2400)
+)
+rotate_to_anterior(show_m)
+make_gif(show_m, "arc3.gif")
#############################################################################
# Show the values of tissue properties along the bundle
@@ -440,24 +403,25 @@ def lines_as_tubes(sl, line_width, **kwargs):
# There is a DIPY example with more details here:
# https://docs.dipy.org/1.11.0/examples_built/streamline_analysis/afq_tract_profiles.html
-lut_args = dict(scale_range=(0, 1),
- hue_range=(1, 0),
- saturation_range=(0, 1),
- value_range=(0, 1))
-
-arc_actor = lines_as_tubes(arc_t1w, 8,
- colors=resample(fa_img, t1w_img).get_fdata(),
- lookup_colormap=colormap_lookup_table(**lut_args))
scene.clear()
-scene.add(arc_actor)
-for slicer in slicers:
- scene.add(slicer)
+fa_in_t1 = resample(fa_img, t1w_img).get_fdata()
+fa_profiles = values_from_volume(fa_in_t1, arc_t1w, np.eye(4))
+for ii in range(len(arc_t1w)):
+ colors = create_colormap(np.asarray(fa_profiles[ii]), name="blues", auto=False)
+ arc_actor = actor.streamlines(
+ arc_t1w[ii], thickness=8,
+ colors=colors)
+ scene.add(arc_actor)
-window.record(scene, out_path=f'{tmp}/arc4', size=(2400, 2400),
- n_frames=n_frames, path_numbering=True)
+scene.add(slicer)
-make_video([f"{tmp}/arc4{ii:06d}.png" for ii in range(n_frames)], "arc4.gif")
+show_m = window.ShowManager(
+ scene=scene, window_type="offscreen",
+ size=(2400, 2400)
+)
+rotate_to_anterior(show_m)
+make_gif(show_m, "arc4.gif")
#############################################################################
# Core of the bundle and tract profile
@@ -468,32 +432,33 @@ def lines_as_tubes(sl, line_width, **kwargs):
core_arc = np.median(np.asarray(set_number_of_points(arc_t1w, 20)), axis=0)
-from dipy.stats.analysis import afq_profile
sft_arc.to_vox()
arc_profile = afq_profile(fa, sft_arc.streamlines, affine=np.eye(4),
n_points=20)
-core_arc_actor = lines_as_tubes(
+core_arc_actor = actor.streamlines(
[core_arc],
- 40,
- colors=create_colormap(arc_profile, 'viridis')
+ thickness=40,
+ colors=create_colormap(arc_profile, name='viridis')
)
-arc_actor = lines_as_tubes(arc_t1w, 1,
- colors=resample(fa_img, t1w_img).get_fdata(),
- lookup_colormap=colormap_lookup_table(**lut_args))
+arc_actor = actor.streamlines(
+ arc_t1w,
+ thickness=1,
+ opacity=0.2) # better to visualize the core
scene.clear()
-for slicer in slicers:
- scene.add(slicer)
+scene.add(slicer)
scene.add(arc_actor)
scene.add(core_arc_actor)
-window.record(scene, out_path=f'{tmp}/arc5', size=(2400, 2400),
- n_frames=n_frames, path_numbering=True)
-
-make_video([f"{tmp}/arc5{ii:06d}.png" for ii in range(n_frames)], "arc5.gif")
+show_m = window.ShowManager(
+ scene=scene, window_type="offscreen",
+ size=(2400, 2400)
+)
+rotate_to_anterior(show_m)
+make_gif(show_m, "arc5.gif")
#############################################################################
# Core of all bundles and their tract profiles
@@ -501,11 +466,9 @@ def lines_as_tubes(sl, line_width, **kwargs):
# Same as before, but for all bundles.
scene.clear()
+scene.add(slicer)
-for slicer in slicers:
- scene.add(slicer)
-
-for bundle in bundles:
+for ii, bundle in enumerate(bundles):
sft = load_trk(op.join(clean_bundles_path,
f'sub-NDARAA948VFH_ses-HBNsiteRU_acq-64dir_space-T1w_desc-preproc_dwi_space-RASMM_model-CSD_desc-prob-afq-{bundle}_tractography.trk'), fa_img)
@@ -513,21 +476,23 @@ def lines_as_tubes(sl, line_width, **kwargs):
bundle_t1w = transform_streamlines(sft.streamlines,
np.linalg.inv(t1w_img.affine))
- bundle_actor = lines_as_tubes(bundle_t1w, 8, colors=color_dict[bundle])
+ bundle_actor = actor.streamlines(
+ bundle_t1w,
+ thickness=8,
+ colors=color_dict[formal_bundles[ii]]
+ )
scene.add(bundle_actor)
-window.record(scene, out_path=f'{tmp}/all_bundles', size=(2400, 2400),
- n_frames=n_frames, path_numbering=True)
-
-make_video(
- [f"{tmp}/all_bundles{ii:06d}.png" for ii in range(n_frames)],
- "all_bundles.gif")
-
+show_m = window.ShowManager(
+ scene=scene, window_type="offscreen",
+ size=(2400, 2400)
+)
+rotate_to_anterior(show_m)
+make_gif(show_m, "all_bundles.gif")
scene.clear()
-for slicer in slicers:
- scene.add(slicer)
+scene.add(slicer)
tract_profiles = []
for bundle in bundles:
@@ -544,22 +509,21 @@ def lines_as_tubes(sl, line_width, **kwargs):
afq_profile(fa, sft.streamlines, affine=np.eye(4),
n_points=20))
- core_actor = lines_as_tubes(
+ core_actor = actor.streamlines(
[core_bundle],
- 40,
- colors=create_colormap(tract_profiles[-1], 'viridis')
+ thickness=40,
+ colors=create_colormap(tract_profiles[-1], name='viridis')
)
scene.add(core_actor)
-window.record(scene,
- out_path=f'{tmp}/all_tract_profiles',
- size=(2400, 2400),
- n_frames=n_frames,
- path_numbering=True)
+show_m = window.ShowManager(
+ scene=scene, window_type="offscreen",
+ size=(2400, 2400)
+)
+rotate_to_anterior(show_m)
+make_gif(show_m, "all_tract_profiles.gif")
-make_video([f"{tmp}/all_tract_profiles{ii:06d}.png" for ii in range(n_frames)],
- "all_tract_profiles.gif")
#############################################################################
# Tract profiles as a table
@@ -575,7 +539,7 @@ def lines_as_tubes(sl, line_width, **kwargs):
for ii, bundle in enumerate(bundles):
ax.plot(np.arange(ii * 20, (ii + 1) * 20),
tract_profiles[ii],
- color=color_dict[bundle],
+ color=color_dict[formal_bundles[ii]],
linewidth=3)
ax.set_xticks(np.arange(0, 20 * len(bundles), 20))
ax.set_xticklabels(bundles, rotation=45, ha='right')
diff --git a/examples/tutorial_examples/plot_005_viz.py b/examples/tutorial_examples/plot_005_viz.py
index bb7a9381..d01e0dde 100644
--- a/examples/tutorial_examples/plot_005_viz.py
+++ b/examples/tutorial_examples/plot_005_viz.py
@@ -21,6 +21,7 @@
import os.path as op
import nibabel as nib
import numpy as np
+from math import radians
from dipy.io.streamline import load_trk
from dipy.tracking.streamline import transform_streamlines
@@ -101,24 +102,6 @@
np.linalg.inv(t1w_img.affine))
-#############################################################################
-#
-# .. note::
-# A virtual frame buffer is needed if you are running this example on
-# a machine that is not connected to a display ("headless"). If this is
-# the case, you can either set an environment variable called `XVFB` to `1`
-# or you can deindent the following code (and comment out the `if` statement)
-# to initialize the virtual frame buffer.
-
-if os.environ.get("XVFB", False):
- print("Initializing XVFB")
- import xvfbwrapper
- from xvfbwrapper import Xvfb
-
- vdisplay = Xvfb()
- vdisplay.start()
-
-
#############################################################################
# Visualizing bundles with principal direction coloring
# -----------------------------------------------------
@@ -138,63 +121,28 @@
# to `actor.line`, but for now we use the default setting, which colors each
# streamline based on the RAS orientation, and we set the line width to 8.
-def lines_as_tubes(sl, line_width, **kwargs):
- line_actor = actor.line(sl, **kwargs)
- line_actor.GetProperty().SetRenderLinesAsTubes(1)
- line_actor.GetProperty().SetLineWidth(line_width)
- return line_actor
-
-arc_actor = lines_as_tubes(arc_t1w, 8)
-cst_actor = lines_as_tubes(cst_t1w, 8)
+arc_actor = actor.streamlines(arc_t1w, thickness=8)
+cst_actor = actor.streamlines(cst_t1w, thickness=8)
#############################################################################
# Slicer actors
# -------------
# The anatomical image is rendered using `slicer` actors. These are actors that
-# visualize one slice of a three dimensional volume. Again, we create a helper
-# function that will slice a volume along the x, y, and z dimensions. This
-# function returns a list of the slicers we want to include in our
-# visualization. This can be one, two, or three slicers, depending on how many
-# of {x,y,z} are set. If you are curious to understand what is going on in this
-# function, take a look at the documentation for the
-# :met:`actor.slicer.display_extent` method (hint: for every dimension you
-# select on, you want the full extent of the image on the two *other* two
-# dimensions). We call the function on the T1-weighted data, selecting the # x
-# slice that is half-way through the x dimension of the image (`shape[0]`) and
-# the z slice that is a third of a way through that x dimension of the image
-# (`shape[-1]`).
-
-
-def slice_volume(data, x=None, y=None, z=None):
- slicer_actors = []
- slicer_actor_z = actor.slicer(data)
- if z is not None:
- slicer_actor_z.display_extent(
- 0, data.shape[0] - 1,
- 0, data.shape[1] - 1,
- z, z)
- slicer_actors.append(slicer_actor_z)
- if y is not None:
- slicer_actor_y = slicer_actor_z.copy()
- slicer_actor_y.display_extent(
- 0, data.shape[0] - 1,
- y, y,
- 0, data.shape[2] - 1)
- slicer_actors.append(slicer_actor_y)
- if x is not None:
- slicer_actor_x = slicer_actor_z.copy()
- slicer_actor_x.display_extent(
- x, x,
- 0, data.shape[1] - 1,
- 0, data.shape[2] - 1)
- slicer_actors.append(slicer_actor_x)
-
- return slicer_actors
-
-
-slicers = slice_volume(t1w, x=t1w.shape[0] // 2, z=t1w.shape[-1] // 3)
+# visualize one slice of a three dimensional volume. We call the function on the
+# T1-weighted data, selecting the # x slice that is half-way through the x
+# dimension of the image (`shape[0]`) and the z slice that is a third of a
+# way through that x dimension of the image (`shape[-1]`).
+# We set the visibility of the y slice to `False`
+
+slicer = actor.data_slicer(t1w,
+ visibility=(
+ True, False, True),
+ initial_slices=(
+ t1w.shape[0] // 2,
+ t1w.shape[1] // 2,
+ t1w.shape[-1] // 3))
#############################################################################
# Making a `scene`
@@ -207,15 +155,14 @@ def slice_volume(data, x=None, y=None, z=None):
scene.add(arc_actor)
scene.add(cst_actor)
-for slicer in slicers:
- scene.add(slicer)
+scene.add(slicer)
#############################################################################
# Showing the visualization
# -------------------------
# If you are working in an interactive session, you can call::
#
-# window.show(scene, size=(1200, 1200), reset_camera=False)
+# window.show(scene, size=(1200, 1200))
#
# to see what the visualization looks like. This would pop up a window that will
# show you the visualization as it is now. You can interact with this
@@ -223,25 +170,7 @@ def slice_volume(data, x=None, y=None, z=None):
# mouse+shift to pan and rotate it in plane, respectively. Use the scroll up and
# scroll down in your mouse to zoom in and out. Once you have found a view of
# the data that you like, you can close the window (as long as its open, it is
-# blocking execution of any further commands in the Python interpreter!) and
-# then you can query your scene for the "camera settings" by calling::
-#
-# scene.camera_info()
-#
-# This will print out to the screen something like this::
-#
-# # Active Camera
-# Position (238.04, 174.48, 143.04)
-# Focal Point (96.32, 110.34, 84.48)
-# View Up (-0.33, -0.12, 0.94)
-#
-# We can use the information we have gleaned to set the camera on subsequent
-# visualization that use this scene object.
-
-
-scene.set_camera(position=(238.04, 174.48, 143.04),
- focal_point=(96.32, 110.34, 84.48),
- view_up=(-0.33, -0.12, 0.94))
+# blocking execution of any further commands in the Python interpreter!)
#############################################################################
# Record the visualization
@@ -253,10 +182,20 @@ def slice_volume(data, x=None, y=None, z=None):
out_folder = op.join(afd.afq_home, "VizExample")
os.makedirs(out_folder, exist_ok=True)
-window.record(
- scene=scene,
- out_path=op.join(out_folder, 'arc_cst1.png'),
- size=(2400, 2400))
+
+def save_png(scene, name):
+ """Helper function to PNGs in this example."""
+ show_m = window.ShowManager(
+ scene=scene, window_type="offscreen",
+ size=(2400, 2400)
+ )
+ window.update_camera(show_m.screens[0].camera, None, scene)
+ show_m.screens[0].controller.rotate((0, radians(-90)), None)
+ show_m.render()
+ show_m.window.draw()
+ show_m.snapshot(op.join(out_folder, name))
+
+save_png(scene, 'arc_cst1.png')
############################################################################
@@ -277,21 +216,16 @@ def slice_volume(data, x=None, y=None, z=None):
color_arc = tab20.colors[18]
color_cst = tab20.colors[2]
-arc_actor = lines_as_tubes(arc_t1w, 8, colors=color_arc)
-cst_actor = lines_as_tubes(cst_t1w, 8, colors=color_cst)
+arc_actor = actor.streamlines(arc_t1w, thickness=8, colors=color_arc)
+cst_actor = actor.streamlines(cst_t1w, thickness=8, colors=color_cst)
scene.clear()
scene.add(arc_actor)
scene.add(cst_actor)
-for slicer in slicers:
- scene.add(slicer)
-
-window.record(
- scene=scene,
- out_path=op.join(out_folder, 'arc_cst2.png'),
- size=(2400, 2400))
+scene.add(slicer)
+save_png(scene, 'arc_cst2.png')
#############################################################################
# Adding core bundles with tract profiles
@@ -333,35 +267,30 @@ def slice_volume(data, x=None, y=None, z=None):
arc_profile = afq_profile(fa, sft_arc.streamlines, affine=np.eye(4))
cst_profile = afq_profile(fa, sft_cst.streamlines, affine=np.eye(4))
-core_arc_actor = lines_as_tubes(
+core_arc_actor = actor.streamlines(
[core_arc],
- 40,
+ thickness=40,
colors=create_colormap(arc_profile, name='viridis')
)
-core_cst_actor = lines_as_tubes(
+core_cst_actor = actor.streamlines(
[core_cst],
- 40,
+ thickness=40,
colors=create_colormap(cst_profile, name='viridis')
)
scene.clear()
-arc_actor = lines_as_tubes(arc_t1w, 8, colors=color_arc, opacity=0.1)
-cst_actor = lines_as_tubes(cst_t1w, 8, colors=color_cst, opacity=0.1)
+arc_actor = actor.streamlines(arc_t1w, thickness=8, colors=color_arc, opacity=0.1)
+cst_actor = actor.streamlines(cst_t1w, thickness=8, colors=color_cst, opacity=0.1)
scene.add(arc_actor)
scene.add(cst_actor)
-for slicer in slicers:
- scene.add(slicer)
+scene.add(slicer)
scene.add(core_arc_actor)
scene.add(core_cst_actor)
-window.record(
- scene=scene,
- out_path=op.join(out_folder, 'arc_cst3.png'),
- size=(2400, 2400))
-
+save_png(scene, 'arc_cst3.png')
#############################################################################
# Adding ROIs
@@ -376,7 +305,7 @@ def slice_volume(data, x=None, y=None, z=None):
# interpolation from the low resolution of the diffusion into the high
# resolution of the T1-weighted. We will include in the volume rendering any
# values larger than 0. The main change from the previous visualizations is the
-# adition of a `contour_from_roi` actor for each of the ROIs. We select another
+# addition of a `contour_from_roi` actor for each of the ROIs. We select another
# color from the Tableau 20 palette to represent this, and use an opacity of
# 0.5.
#
@@ -406,13 +335,12 @@ def slice_volume(data, x=None, y=None, z=None):
scene.clear()
-arc_actor = lines_as_tubes(arc_t1w, 8, colors=color_arc)
-cst_actor = lines_as_tubes(cst_t1w, 8, colors=color_cst)
+arc_actor = actor.streamlines(arc_t1w, thickness=8, colors=color_arc)
+cst_actor = actor.streamlines(cst_t1w, thickness=8, colors=color_cst)
scene.add(arc_actor)
scene.add(cst_actor)
-for slicer in slicers:
- scene.add(slicer)
+scene.add(slicer)
surface_color = tab20.colors[0]
@@ -428,11 +356,7 @@ def slice_volume(data, x=None, y=None, z=None):
scene.add(waypoint1_actor)
scene.add(waypoint2_actor)
-
-window.record(
- scene=scene,
- out_path=op.join(out_folder, 'arc_cst4.png'),
- size=(2400, 2400))
+save_png(scene, 'arc_cst4.png')
#############################################################################
# Visualizing tracts and tract profiles with a "glass brain"
@@ -471,14 +395,11 @@ def slice_volume(data, x=None, y=None, z=None):
np.linspace(0, 1, len(arc_profile)),
arc_profile)
colors = create_colormap(interpolated_values, name='Spectral')
- line_actor = lines_as_tubes([sl], 8, colors=colors)
+ line_actor = actor.streamlines([sl], thickness=8, colors=colors)
scene.add(line_actor)
-scene.background((1, 1, 1))
-window.record(
- scene=scene,
- out_path=op.join(out_folder, 'arc_cst5.png'),
- size=(2400, 2400))
+scene.background = (1, 1, 1)
+save_png(scene, 'arc_cst5.png')
#############################################################################
# Making a Figure out of many fury panels
@@ -496,15 +417,6 @@ def slice_volume(data, x=None, y=None, z=None):
pf.add_img(op.join(out_folder, 'arc_cst5.png'), 1, 1)
pf.format_and_save_figure(f"arc_cst_fig.png")
-#############################################################################
-#
-# .. note::
-# If a virtual buffer was started before, it's a good idea to stop it.
-
-if os.environ.get("XVFB", False):
- print("Stopping XVFB")
- vdisplay.stop()
-
#############################################################################
# References
# ----------
diff --git a/setup.cfg b/setup.cfg
index 0b8feeda..6d66c246 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -26,13 +26,13 @@ setup_requires =
setuptools_scm>=8
setuptools>=64
-python_requires = >=3.10, <3.13
+python_requires = >=3.11, <3.14
install_requires =
# core packages
scikit_image>=0.14.2
dipy>=1.11.0,<1.12.0
scikit-learn
- pandas
+ pandas>=2.2.3
pybids>=0.16.2
templateflow>=0.8
immlib
@@ -69,7 +69,6 @@ dev =
numpydoc==1.2
sphinx-autoapi
rapidfuzz
- xvfbwrapper>=0.2.9
moto>=3.0.0,<5.0.0
pydata-sphinx-theme
sphinx-design
@@ -79,9 +78,7 @@ dev =
ruff>=0.14.10
pre-commit
fury =
- fury==0.12.0
- xvfbwrapper>=0.2.9
- ipython
+ fury>=2.0.0a1
fsl =
fslpy
afqbrowser =