Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions RetroFE/Source/Graphics/Component/ScrollingList.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,16 @@ void ScrollingList::deallocateSpritePoints() {
}
}

void ScrollingList::notifyVideoCenterMode() {
if (videoType_ == "null") return; // no video slots in this list
const size_t N = components_.size();
for (size_t i = 0; i < N; ++i) {
if (auto* vc = dynamic_cast<VideoComponent*>(components_[i])) {
vc->setCenterMode(i == selectedOffsetIndex_);
}
}
}

void ScrollingList::allocateSpritePoints() {
if (!items_ || items_->empty()) return;
if (!scrollPoints_ || scrollPoints_->empty()) return;
Expand All @@ -238,6 +248,9 @@ void ScrollingList::allocateSpritePoints() {
resetTweens(c, (*tweenPoints_)[i], view, view, 0);
}
}

// Notify each video slot of its center/side role so audio is managed correctly.
notifyVideoCenterMode();
}

void ScrollingList::reallocateSpritePoints() {
Expand Down Expand Up @@ -1341,6 +1354,10 @@ void ScrollingList::scroll(bool forward) {
}

components_.rotate(forward);

// Notify each video slot of its updated center/side role after the rotation.
// Center slot (selectedOffsetIndex_) enables audio; all others mute.
notifyVideoCenterMode();
}

bool ScrollingList::isPlaylist() const
Expand Down
1 change: 1 addition & 0 deletions RetroFE/Source/Graphics/Component/ScrollingList.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,7 @@ class ScrollingList : public Component

void clearPoints();
void clearTweenPoints();
void notifyVideoCenterMode(); // tells each VideoComponent slot whether it is the center slot

void resetTweens(Component* c, std::shared_ptr<AnimationEvents> sets, ViewInfo* currentViewInfo, ViewInfo* nextViewInfo, float scrollTime) const;
inline size_t loopIncrement(size_t offset, size_t index, size_t size) const;
Expand Down
7 changes: 7 additions & 0 deletions RetroFE/Source/Graphics/Component/VideoComponent.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,13 @@ void VideoComponent::resume() {
videoInst_->resume();
}

void VideoComponent::setCenterMode(bool isCenterMode) {
if (!videoInst_) return;
if (auto* gst = dynamic_cast<GStreamerVideo*>(videoInst_.get())) {
gst->setCenterMode(isCenterMode);
}
}


void VideoComponent::restart() {
if (!videoInst_) {
Expand Down
1 change: 1 addition & 0 deletions RetroFE/Source/Graphics/Component/VideoComponent.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ class VideoComponent : public Component {
void pause() override;
void resume();
void restart() override;
void setCenterMode(bool isCenterMode);
unsigned long long getCurrent() override;
unsigned long long getDuration() override;
bool isPaused() override;
Expand Down
224 changes: 220 additions & 4 deletions RetroFE/Source/Video/GStreamerVideo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -633,6 +633,21 @@ bool GStreamerVideo::unload() {
// This is what fixed your memory leak.
gst_element_set_state(pipeline_, GST_STATE_READY);

// Drain both appsinks after READY to ensure no stale samples linger.
// This prevents old-URI frames from flashing on the next play().
if (videoSink_ && GST_IS_APP_SINK(videoSink_)) {
while (GstSample* s = gst_app_sink_try_pull_sample(GST_APP_SINK(videoSink_), 0))
gst_sample_unref(s);
while (GstSample* s = gst_app_sink_try_pull_preroll(GST_APP_SINK(videoSink_), 0))
gst_sample_unref(s);
}
if (audioSink_ && GST_IS_APP_SINK(audioSink_)) {
while (GstSample* s = gst_app_sink_try_pull_sample(GST_APP_SINK(audioSink_), 0))
gst_sample_unref(s);
while (GstSample* s = gst_app_sink_try_pull_preroll(GST_APP_SINK(audioSink_), 0))
gst_sample_unref(s);
}

// Clear our local atomic sample reference
if (GstSample* old = stagedSample_.exchange(nullptr, std::memory_order_acq_rel)) {
gst_sample_unref(old);
Expand Down Expand Up @@ -812,8 +827,11 @@ bool GStreamerVideo::createPipelineIfNeeded() {
gst_caps_unref(acaps);

// Set playbin flags and properties.
// Start with VIDEO|AUDIO; clear flags we never want.
gint flags = GST_PLAY_FLAG_VIDEO | GST_PLAY_FLAG_AUDIO;
flags &= ~(1 << 4); // Clear GST_PLAY_FLAG_SOFT_VOLUME
flags &= ~GST_PLAY_FLAG_SOFT_VOLUME; // we manage volume via AudioBus
flags &= ~GST_PLAY_FLAG_BUFFERING; // no buffering logic for local files
flags &= ~GST_PLAY_FLAG_TEXT; // no subtitles/text tracks
g_object_set(pipeline_, "flags", flags, "instant-uri", TRUE, nullptr);

// Configure appsink.
Expand Down Expand Up @@ -857,12 +875,17 @@ bool GStreamerVideo::createPipelineIfNeeded() {
else {
videoCaps = gst_caps_from_string(
"video/x-raw,format=(string)I420,pixel-aspect-ratio=(fraction)1/1");
elementSetupHandlerId_ = g_signal_connect(pipeline_, "element-setup",
G_CALLBACK(elementSetupCallback), this);
sdlFormat_ = SDL_PIXELFORMAT_IYUV;
LOG_DEBUG("GStreamerVideo", "SDL pixel format: SDL_PIXELFORMAT_IYUV (HW accel: false)");
}
}

// Connect element-setup unconditionally so multiqueue is tuned in both HW and SW paths.
// Decoder-specific tweaks remain conditional inside the callback.
if (elementSetupHandlerId_ == 0) {
elementSetupHandlerId_ = g_signal_connect(pipeline_, "element-setup",
G_CALLBACK(elementSetupCallback), this);
}
gst_app_sink_set_caps(GST_APP_SINK(videoSink_), videoCaps);
gst_caps_unref(videoCaps);

Expand Down Expand Up @@ -970,6 +993,7 @@ bool GStreamerVideo::play(const std::string& file) {
pipeLineReady_.store(false, std::memory_order_release);
targetState_.store(IVideo::VideoState::Paused, std::memory_order_release);
dimensions_.store({ -1, -1 }, std::memory_order_release);
audioEnabled_ = true; // play() always enables audio

auto state = asyncState_;

Expand Down Expand Up @@ -1009,7 +1033,199 @@ bool GStreamerVideo::play(const std::string& file) {
return true;
}

GstPadProbeReturn GStreamerVideo::padProbeCallback(GstPad* /*pad*/, GstPadProbeInfo* info, gpointer user_data) {
bool GStreamerVideo::prime(const std::string& file, bool enableAudio) {
if (!initialized_) return false;

LOG_INFO("GStreamerVideo", "Prime start: " + file +
" (audio=" + (enableAudio ? "enabled" : "disabled") + ")");

// 1. Main Thread: Immediate UI/State update
const uint64_t newSessionId = nextUniquePlaySessionId_++;
currentPlaySessionId_.store(newSessionId, std::memory_order_release);

currentFile_ = file;
pipeLineReady_.store(false, std::memory_order_release);
targetState_.store(IVideo::VideoState::Paused, std::memory_order_release);
dimensions_.store({ -1, -1 }, std::memory_order_release);
audioEnabled_ = enableAudio;

auto state = asyncState_;

ThreadPool::getInstance().enqueue([this, file, enableAudio, newSessionId, state]() {
if (currentPlaySessionId_.load(std::memory_order_acquire) != newSessionId) return;

std::lock_guard<std::mutex> lock(state->mutex);
if (!state->alive.load(std::memory_order_acquire) ||
currentPlaySessionId_.load(std::memory_order_acquire) != newSessionId) return;

if (!createPipelineIfNeeded()) return;

// Apply audio flags BEFORE setting URI so the pipeline is built correctly.
gint flags = GST_PLAY_FLAG_VIDEO;
if (enableAudio) flags |= GST_PLAY_FLAG_AUDIO;
flags &= ~GST_PLAY_FLAG_SOFT_VOLUME;
flags &= ~GST_PLAY_FLAG_BUFFERING;
flags &= ~GST_PLAY_FLAG_TEXT;
g_object_set(pipeline_, "flags", flags, nullptr);

// Clear any stale sample from a previous URI
if (GstSample* old = stagedSample_.exchange(nullptr, std::memory_order_acq_rel)) {
gst_sample_unref(old);
}

// Set new URI and begin preroll
gchar* uri = gst_filename_to_uri(file.c_str(), nullptr);
g_object_set(pipeline_, "uri", uri, nullptr);
g_free(uri);

gst_element_set_state(pipeline_, GST_STATE_PAUSED);

// Ensure AudioBus source exists even for audio-disabled slots (for later enable)
if (videoSourceId_ == 0) {
videoSourceId_ = AudioBus::instance().addSource("video-preview");
audioHandle_ = AudioBus::instance().getHandle(videoSourceId_);
}
AudioBus::instance().setGain(audioHandle_, 0.0f);

LOG_INFO("GStreamerVideo", "Prime complete (preroll started): " + file +
" session=" + std::to_string(newSessionId) +
" audio=" + (enableAudio ? "on" : "off"));
});

return true;
}

void GStreamerVideo::setCenterMode(bool isCenterMode) {
if (audioEnabled_ == isCenterMode) {
// No audio-mode change; just log for debugging in case of redundant calls.
LOG_DEBUG("GStreamerVideo", "setCenterMode(" + std::string(isCenterMode ? "true" : "false") +
"): no change for " + currentFile_);
return;
}

audioEnabled_ = isCenterMode;

if (isCenterMode) {
// Side → Center: re-enable audio and schedule a brief READY→PAUSED cycle
// so playbin3 builds the audio decode chain.
// The old texture remains valid during the re-preroll so no black flash occurs.
LOG_INFO("GStreamerVideo", "setCenterMode: Side->Center for " + currentFile_);

if (!pipeline_ || currentFile_.empty()) return;

const uint64_t sessionId = currentPlaySessionId_.load(std::memory_order_acquire);
auto state = asyncState_;

ThreadPool::getInstance().enqueue([this, sessionId, state]() {
std::lock_guard<std::mutex> lock(state->mutex);
if (!state->alive.load(std::memory_order_acquire) ||
currentPlaySessionId_.load(std::memory_order_acquire) != sessionId) return;

if (!pipeline_) return;

// Update flags to include audio
gint flags = GST_PLAY_FLAG_VIDEO | GST_PLAY_FLAG_AUDIO;
flags &= ~GST_PLAY_FLAG_SOFT_VOLUME;
flags &= ~GST_PLAY_FLAG_BUFFERING;
flags &= ~GST_PLAY_FLAG_TEXT;
g_object_set(pipeline_, "flags", flags, nullptr);

// Brief READY → PAUSED cycle to activate the audio decode chain.
// We deliberately do NOT clear isTextureReady_ or stagedSample_ here so
// the last video frame continues to display during re-preroll.
gst_element_set_state(pipeline_, GST_STATE_READY);
gst_element_set_state(pipeline_, GST_STATE_PAUSED);

// If resume() was already called before this task ran (targetState_ == Playing),
// continue immediately to PLAYING so the pipeline doesn't get stuck in PAUSED.
if (targetState_.load(std::memory_order_acquire) == IVideo::VideoState::Playing) {
gst_element_set_state(pipeline_, GST_STATE_PLAYING);
}

LOG_INFO("GStreamerVideo", "setCenterMode: Audio re-enabled, re-prerolling for " + currentFile_);
});
}
else {
// Center → Side: immediately silence audio output.
// Keep the pipeline PLAYING/PAUSED — just zero the gain.
LOG_INFO("GStreamerVideo", "setCenterMode: Center->Side for " + currentFile_);
if (audioHandle_) {
AudioBus::instance().setGain(audioHandle_, 0.0f);
}
}
}

bool GStreamerVideo::hibernate() {
if (!initialized_) return false;

LOG_INFO("GStreamerVideo", "Hibernate: moving to READY and draining for " + currentFile_);

// 1. Main Thread: Immediate UI/State update (same as unload)
const uint64_t sessionId = nextUniquePlaySessionId_++;
currentPlaySessionId_.store(sessionId, std::memory_order_release);

targetState_.store(IVideo::VideoState::None, std::memory_order_release);
isTextureReady_ = false;
dimensions_.store({ -1, -1 }, std::memory_order_release);
currentFile_ = "";

hasError_.store(false, std::memory_order_release);
hasVideoStream_.store(false, std::memory_order_release);
loopsFinished_.store(false, std::memory_order_release);

auto state = asyncState_;

ThreadPool::getInstance().enqueue([this, sessionId, state]() {
if (currentPlaySessionId_.load(std::memory_order_acquire) != sessionId) return;

std::lock_guard<std::mutex> lock(state->mutex);
if (!state->alive.load(std::memory_order_acquire) ||
currentPlaySessionId_.load(std::memory_order_acquire) != sessionId) return;

if (!pipeline_) return;

// Move to READY to free decode resources
gst_element_set_state(pipeline_, GST_STATE_READY);

// Thoroughly drain both appsinks to prevent stale samples on next use
if (videoSink_ && GST_IS_APP_SINK(videoSink_)) {
while (GstSample* s = gst_app_sink_try_pull_sample(GST_APP_SINK(videoSink_), 0))
gst_sample_unref(s);
while (GstSample* s = gst_app_sink_try_pull_preroll(GST_APP_SINK(videoSink_), 0))
gst_sample_unref(s);
}
if (audioSink_ && GST_IS_APP_SINK(audioSink_)) {
while (GstSample* s = gst_app_sink_try_pull_sample(GST_APP_SINK(audioSink_), 0))
gst_sample_unref(s);
while (GstSample* s = gst_app_sink_try_pull_preroll(GST_APP_SINK(audioSink_), 0))
gst_sample_unref(s);
}

// Clear staged sample
if (GstSample* old = stagedSample_.exchange(nullptr, std::memory_order_acq_rel)) {
gst_sample_unref(old);
}

// Flush bus to avoid stale message backlog
if (GstBus* bus = gst_element_get_bus(pipeline_)) {
gst_bus_set_flushing(bus, TRUE);
gst_bus_set_flushing(bus, FALSE);
gst_object_unref(bus);
}

// Mute audio output
if (videoSourceId_ != 0) {
AudioBus::instance().setGain(audioHandle_, 0.0f);
AudioBus::instance().clear(audioHandle_);
}

LOG_INFO("GStreamerVideo", "Hibernate complete: pipeline in READY state.");
});

return true;
}


auto* ctx = static_cast<CallbackCtx*>(user_data);
if (!ctx || !ctx->state || !ctx->state->alive.load(std::memory_order_acquire)) {
return GST_PAD_PROBE_OK;
Expand Down
15 changes: 15 additions & 0 deletions RetroFE/Source/Video/GStreamerVideo.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,20 @@ class GStreamerVideo final : public IVideo {
void setPerspectiveCorners(const int* corners);
bool hasVideoStream() const { return hasVideoStream_; }

// --- Carousel / crossfade helpers ---
// Prime a slot in PAUSED with optional audio decode.
// enableAudio=false skips audio decode, reducing preroll latency for side/offscreen slots.
bool prime(const std::string& file, bool enableAudio = false);

// Signal center-mode transitions.
// isCenterMode=true (side→center): re-enables audio pipeline and allows volume to ramp.
// isCenterMode=false (center→side): immediately mutes audio output.
void setCenterMode(bool isCenterMode);

// Move pipeline to GST_STATE_READY and drain appsinks. Use for far-away slots
// that are no longer adjacent and should not consume decode/memory resources.
bool hibernate();


bool hasFinishedLoops() const;

Expand Down Expand Up @@ -139,6 +153,7 @@ class GStreamerVideo final : public IVideo {
std::atomic<int> numLoops_{ 0 };
std::string currentFile_{};
float volume_{ 0.0f };
bool audioEnabled_{ true }; // tracks whether the current pipeline was built with audio

// Tracked allocation state to avoid redundant SDL_QueryTexture calls
int allocatedWidth_{ 0 };
Expand Down