Compare commits
No commits in common. '5c97165eb0e87aa656a39feb67a24eb2a77d8c23' and 'df2e9f899dbfaa8b07e89f824ca638fdcc04981b' have entirely different histories.
5c97165eb0
...
df2e9f899d
11 changed files with 2 additions and 600 deletions
@ -1,34 +0,0 @@ |
|||||||
import Effect from './Effect.js'; |
|
||||||
|
|
||||||
export default class BlendEffect extends Effect { |
|
||||||
constructor(options = {}) { |
|
||||||
super(options); |
|
||||||
this.setDefaults({ |
|
||||||
blendMode: "screen", |
|
||||||
opacity: 1.0 |
|
||||||
}); |
|
||||||
} |
|
||||||
|
|
||||||
apply(ctx, video, params = {}) { |
|
||||||
if (!this.enabled) return; |
|
||||||
|
|
||||||
this.beforeApply(ctx, video, params); |
|
||||||
|
|
||||||
// Retrieve blend mode and opacity, defaulting to "screen" and 1.0
|
|
||||||
const blendMode = this.getParam("blendMode", params.blendMode); |
|
||||||
const opacity = this.getParam("opacity", params.opacity); |
|
||||||
|
|
||||||
// Set blend mode and opacity
|
|
||||||
ctx.globalCompositeOperation = blendMode; |
|
||||||
ctx.globalAlpha = opacity; |
|
||||||
|
|
||||||
// Draw the video frame onto the canvas
|
|
||||||
ctx.drawImage(video, 0, 0, ctx.canvas.width, ctx.canvas.height); |
|
||||||
|
|
||||||
// Reset to default values for further drawing
|
|
||||||
ctx.globalCompositeOperation = "source-over"; |
|
||||||
ctx.globalAlpha = 1.0; |
|
||||||
|
|
||||||
this.afterApply(ctx, video, params); |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,37 +0,0 @@ |
|||||||
export default class Effect { |
|
||||||
constructor(options = {}) { |
|
||||||
this.enabled = options.enabled !== undefined ? options.enabled : true; |
|
||||||
this.priority = options.priority || 0; |
|
||||||
this.params = options.params || {}; |
|
||||||
this.debug = options.debug || false; |
|
||||||
this.id = options.id || `effect-${Math.random().toString(36).substr(2, 9)}`; |
|
||||||
} |
|
||||||
|
|
||||||
// Enable or disable the effect
|
|
||||||
enable() { this.enabled = true; } |
|
||||||
disable() { this.enabled = false; } |
|
||||||
|
|
||||||
// Update parameters for live adjustments
|
|
||||||
updateParams(newParams = {}) { Object.assign(this.params, newParams); } |
|
||||||
|
|
||||||
// Set default parameters, to be called in subclasses
|
|
||||||
setDefaults(defaultParams) { this.params = { ...defaultParams, ...this.params }; } |
|
||||||
|
|
||||||
// Log messages if debug mode is enabled
|
|
||||||
debugLog(...args) { if (this.debug) console.log(`[${this.constructor.name} - ${this.id}]`, ...args); } |
|
||||||
|
|
||||||
// Get parameter with fallback
|
|
||||||
getParam(key, fallback) { return key in this.params ? this.params[key] : fallback; } |
|
||||||
|
|
||||||
// Lifecycle hooks for optional custom behavior
|
|
||||||
beforeApply(ctx, video, params) { /* Optionally overridden in subclass */ } |
|
||||||
afterApply(ctx, video, params) { /* Optionally overridden in subclass */ } |
|
||||||
|
|
||||||
// Base apply method to be implemented in each subclass
|
|
||||||
apply(ctx, video, params) { |
|
||||||
if (!this.enabled) return; |
|
||||||
this.beforeApply(ctx, video, params); |
|
||||||
throw new Error("apply() method must be implemented by subclass"); |
|
||||||
this.afterApply(ctx, video, params); |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,37 +0,0 @@ |
|||||||
import Effect from './Effect.js'; |
|
||||||
|
|
||||||
export default class ScaleEffect extends Effect { |
|
||||||
constructor(options = {}) { |
|
||||||
super(options); |
|
||||||
this.setDefaults({ |
|
||||||
scaleMode: "fit" // Scale mode, e.g., "fit" or "fill"
|
|
||||||
}); |
|
||||||
} |
|
||||||
|
|
||||||
apply(ctx, video, params = {}) { |
|
||||||
if (!this.enabled) return; |
|
||||||
|
|
||||||
this.beforeApply(ctx, video, params); |
|
||||||
|
|
||||||
const scaleMode = this.getParam("scaleMode", params.scaleMode); |
|
||||||
|
|
||||||
// Determine the scale to either fit or fill the canvas
|
|
||||||
let scale; |
|
||||||
if (scaleMode === "fill") { |
|
||||||
scale = Math.min(ctx.canvas.width / video.videoWidth, ctx.canvas.height / video.videoHeight); |
|
||||||
} else { // Default to "fit" mode
|
|
||||||
scale = Math.max(ctx.canvas.width / video.videoWidth, ctx.canvas.height / video.videoHeight); |
|
||||||
} |
|
||||||
|
|
||||||
// Calculate scaled dimensions and offset for centering
|
|
||||||
const scaledWidth = video.videoWidth * scale; |
|
||||||
const scaledHeight = video.videoHeight * scale; |
|
||||||
const offsetX = (ctx.canvas.width - scaledWidth) / 2; |
|
||||||
const offsetY = (ctx.canvas.height - scaledHeight) / 2; |
|
||||||
|
|
||||||
// Draw the scaled video onto the canvas
|
|
||||||
ctx.drawImage(video, offsetX, offsetY, scaledWidth, scaledHeight); |
|
||||||
|
|
||||||
this.afterApply(ctx, video, params); |
|
||||||
} |
|
||||||
} |
|
||||||
@ -1,42 +0,0 @@ |
|||||||
// EventEmitter.js
|
|
||||||
class EventEmitter { |
|
||||||
constructor(debug = false) { |
|
||||||
this.events = {}; |
|
||||||
this.debug = debug; |
|
||||||
} |
|
||||||
|
|
||||||
on(event, listener) { |
|
||||||
if (!this.events[event]) this.events[event] = []; |
|
||||||
this.events[event].push(listener); |
|
||||||
} |
|
||||||
|
|
||||||
once(event, listener) { |
|
||||||
const wrapper = (...args) => { |
|
||||||
listener(...args); |
|
||||||
this.off(event, wrapper); |
|
||||||
}; |
|
||||||
this.on(event, wrapper); |
|
||||||
} |
|
||||||
|
|
||||||
off(event, listener) { |
|
||||||
if (this.events[event]) { |
|
||||||
this.events[event] = this.events[event].filter(l => l !== listener); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
emit(event, data) { |
|
||||||
if (this.debug) console.log(`Event emitted: ${event}`, data); |
|
||||||
(this.events[event] || []).forEach(listener => listener(data)); |
|
||||||
(this.events['*'] || []).forEach(listener => listener(event, data)); |
|
||||||
} |
|
||||||
|
|
||||||
listeners(event) { |
|
||||||
return this.events[event] || []; |
|
||||||
} |
|
||||||
|
|
||||||
clear() { |
|
||||||
this.events = {}; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
export default EventEmitter; |
|
||||||
@ -1,365 +0,0 @@ |
|||||||
# VFX2 |
|
||||||
|
|
||||||
. |
|
||||||
├── index.html |
|
||||||
├── index.js |
|
||||||
├── LICENSE |
|
||||||
├── package.json |
|
||||||
├── package-lock.json |
|
||||||
├── README.md |
|
||||||
├── script.js |
|
||||||
├── styles.css |
|
||||||
└── summary.txt |
|
||||||
|
|
||||||
1 directory, 9 files |
|
||||||
<!DOCTYPE html> |
|
||||||
<html> |
|
||||||
<head> |
|
||||||
<meta charset="UTF-8"> |
|
||||||
<title>VFX2</title> |
|
||||||
<link rel="stylesheet" href="styles.css"> |
|
||||||
</head> |
|
||||||
<body> |
|
||||||
<div id="visualizer"> |
|
||||||
|
|
||||||
<canvas id="videoCanvas"></canvas> |
|
||||||
|
|
||||||
<div class="glitch-layer"> |
|
||||||
<!-- Add occasional glitch effects here --> |
|
||||||
</div> |
|
||||||
</div> |
|
||||||
|
|
||||||
<script src="script.js"></script> |
|
||||||
</body> |
|
||||||
</html> |
|
||||||
const express = require('express'); |
|
||||||
const app = express(); |
|
||||||
const path = require('path'); |
|
||||||
const fs = require('fs'); |
|
||||||
|
|
||||||
const port = 8000 |
|
||||||
|
|
||||||
const assetsDir = path.join(__dirname, 'assets'); |
|
||||||
|
|
||||||
app.use(express.static(path.join(__dirname))); |
|
||||||
|
|
||||||
app.get('/', (req, res) => { |
|
||||||
res.sendFile(path.join(__dirname, 'index.html')); |
|
||||||
}); |
|
||||||
|
|
||||||
app.get('/api/videos', (req, res) => { |
|
||||||
fs.readdir(assetsDir, (err, files) => { |
|
||||||
if (err) { |
|
||||||
console.error('Error reading directory:', err); |
|
||||||
res.status(500).send('Internal Server Error'); |
|
||||||
return; |
|
||||||
} |
|
||||||
|
|
||||||
const webmFiles = files.filter(file => file.endsWith('.webm')); |
|
||||||
const videoFiles = webmFiles.map(file => ({ src: `assets/${file}` })); |
|
||||||
|
|
||||||
res.json(videoFiles); |
|
||||||
}); |
|
||||||
}); |
|
||||||
|
|
||||||
app.listen(port, () => { |
|
||||||
console.log('Server started on port', port); |
|
||||||
}); |
|
||||||
// Define a debug mode variable |
|
||||||
let debugMode = true |
|
||||||
|
|
||||||
const canvas = document.getElementById("videoCanvas") |
|
||||||
const ctx = canvas.getContext("2d") |
|
||||||
|
|
||||||
let videosToLoad = 2 |
|
||||||
const maxVideosToLoad = 4 |
|
||||||
const videos = [] |
|
||||||
let videosLoaded = 0 |
|
||||||
|
|
||||||
let aspectRatio = 1 |
|
||||||
let cachedVideos = [] |
|
||||||
|
|
||||||
let showText = true |
|
||||||
|
|
||||||
const parameterStore = { |
|
||||||
blendModeParams: { |
|
||||||
mode: "screen", |
|
||||||
opacity: 1, |
|
||||||
}, |
|
||||||
filterParams: { |
|
||||||
grayscale: 0, |
|
||||||
blur: 0, |
|
||||||
}, |
|
||||||
transformParams: { |
|
||||||
scale: 1, |
|
||||||
// Additional parameters specific to the tile mode or other transformation |
|
||||||
tilePositionX: 0, // X-coordinate for positioning the tiled video |
|
||||||
tilePositionY: 0, // Y-coordinate for positioning the tiled video |
|
||||||
tileScaleX: 1, // Scale factor along the X-axis for the tiled video |
|
||||||
tileScaleY: 1, // Scale factor along the Y-axis for the tiled video |
|
||||||
}, |
|
||||||
presets: { |
|
||||||
default: { |
|
||||||
blendModeParams: { mode: "screen", opacity: 1 }, |
|
||||||
}, |
|
||||||
sepia: { |
|
||||||
blendModeParams: { mode: "overlay", opacity: 0.5 }, |
|
||||||
}, |
|
||||||
tile: { |
|
||||||
tilePositionX: 0, |
|
||||||
tilePositionY: 0, |
|
||||||
tileScaleX: 1, |
|
||||||
tileScaleY: 1, |
|
||||||
}, |
|
||||||
// Add more presets as needed |
|
||||||
}, |
|
||||||
selectedPreset: "default", |
|
||||||
}; |
|
||||||
|
|
||||||
function applyPreset(presetName) { |
|
||||||
const preset = parameterStore.presets[presetName]; |
|
||||||
if (preset) { |
|
||||||
for (const [category, categoryParams] of Object.entries(preset)) { |
|
||||||
// Check if the category exists in the parameter store |
|
||||||
if (parameterStore.hasOwnProperty(category)) { |
|
||||||
// Iterate over each parameter in the category |
|
||||||
for (const [paramKey, paramValue] of Object.entries(categoryParams)) { |
|
||||||
// Update the corresponding parameter in the parameter store |
|
||||||
parameterStore[category][paramKey] = paramValue; |
|
||||||
} |
|
||||||
} else { |
|
||||||
console.error(`Category "${category}" not found in parameter store.`); |
|
||||||
} |
|
||||||
} |
|
||||||
// Update selected preset |
|
||||||
parameterStore.selectedPreset = presetName; |
|
||||||
} else { |
|
||||||
console.error(`Preset "${presetName}" not found.`); |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
function debugLog(...args) { |
|
||||||
if (debugMode) { |
|
||||||
console.log(...args) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
function loadVideos(num) { |
|
||||||
debugLog(`Loading ${num} videos`) |
|
||||||
if (num < 0) { |
|
||||||
num = Math.max(num,-Math.abs(maxVideosToLoad)) |
|
||||||
} else { |
|
||||||
num = Math.min(num,Math.abs(maxVideosToLoad)) |
|
||||||
} |
|
||||||
while (num > 0) { |
|
||||||
let video = createVideoElement() |
|
||||||
video.addEventListener("loadedmetadata", handleVideoLoaded) |
|
||||||
video.addEventListener("ended", handleVideoEnded) |
|
||||||
video.addEventListener("error", handleVideoEnded) |
|
||||||
video.src = selectRandomVideo(video) |
|
||||||
videos.push(video) |
|
||||||
num-- |
|
||||||
} |
|
||||||
while (num < 0) { |
|
||||||
let video = videos.pop() |
|
||||||
video.removeEventListener("loadedmetadata", handleVideoLoaded) |
|
||||||
video.removeEventListener("ended", handleVideoEnded) |
|
||||||
video.removeEventListener("error", handleVideoEnded) |
|
||||||
num++ |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
function createVideoElement() { |
|
||||||
const video = document.createElement("video") |
|
||||||
video.autoplay = true |
|
||||||
video.muted = true |
|
||||||
return video |
|
||||||
} |
|
||||||
|
|
||||||
function handleVideoLoaded() { |
|
||||||
videosLoaded++ |
|
||||||
if (videosLoaded === videos.length) { |
|
||||||
updateCanvasSize() |
|
||||||
drawVideos() |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
function handleVideoEnded(event) { |
|
||||||
const video = event.target |
|
||||||
//debugLog(video,event) |
|
||||||
selectRandomVideo(video) |
|
||||||
} |
|
||||||
|
|
||||||
function drawVideos() { |
|
||||||
ctx.clearRect(0, 0, canvas.width, canvas.height) |
|
||||||
|
|
||||||
videos.forEach((video,i) => { |
|
||||||
// Calculate scaling factor to fit the canvas |
|
||||||
const scale = Math.max(canvas.width/video.videoWidth, canvas.height/video.videoHeight) |
|
||||||
|
|
||||||
// Calculate scaled dimensions |
|
||||||
const scaledWidth = video.videoWidth * scale |
|
||||||
const scaledHeight = video.videoHeight * scale |
|
||||||
|
|
||||||
// Calculate horizontal and vertical centering offset |
|
||||||
const offsetX = (canvas.width-scaledWidth)/2 |
|
||||||
const offsetY = (canvas.height-scaledHeight)/2 |
|
||||||
|
|
||||||
// Use default blend mode for first video (i=0), repeat the rest of blendModes |
|
||||||
if (i === 0) { |
|
||||||
ctx.globalCompositeOperation = "source-over" |
|
||||||
} else { |
|
||||||
// Using values from parameterStore |
|
||||||
ctx.globalCompositeOperation = parameterStore.blendModeParams.mode |
|
||||||
ctx.globalAlpha = parameterStore.blendModeParams.opacity |
|
||||||
} |
|
||||||
video.globalCompositeOperation = ctx.globalCompositeOperation || "default" |
|
||||||
|
|
||||||
// Draw the current video frame onto the canvas |
|
||||||
ctx.drawImage(video, offsetX, offsetY, scaledWidth, scaledHeight) |
|
||||||
|
|
||||||
// Reset composite operation |
|
||||||
ctx.globalCompositeOperation = "source-over" |
|
||||||
}) |
|
||||||
|
|
||||||
// Get pixel data |
|
||||||
let imageData = ctx.getImageData(0, 0, canvas.width, canvas.height); |
|
||||||
|
|
||||||
// If showText is true, draw debug text |
|
||||||
if (showText) { |
|
||||||
drawDebugText() |
|
||||||
} |
|
||||||
|
|
||||||
// Request next frame |
|
||||||
requestAnimationFrame(drawVideos) |
|
||||||
} |
|
||||||
|
|
||||||
function drawDebugText() { |
|
||||||
// Set font style and color |
|
||||||
ctx.font = "16px Arial" |
|
||||||
ctx.fillStyle = "white" |
|
||||||
let padding = 20 |
|
||||||
|
|
||||||
// Calculate the position for the active preset text at the top center of the canvas |
|
||||||
const presetText = `${parameterStore.selectedPreset}`; |
|
||||||
const textWidth = ctx.measureText(presetText).width; |
|
||||||
const centerX = canvas.width / 2; |
|
||||||
const positionX = centerX - textWidth / 2; |
|
||||||
const positionY = padding; |
|
||||||
|
|
||||||
// Set text alignment to center horizontally |
|
||||||
ctx.textAlign = "center"; |
|
||||||
ctx.textBaseline = "top"; |
|
||||||
|
|
||||||
// Draw the active preset text |
|
||||||
ctx.fillText(`${presetText}`, positionX, positionY); |
|
||||||
ctx.fillText(`${canvas.width}x${canvas.height} (${aspectRatio.toFixed(2)})`, positionX, positionY+20); |
|
||||||
|
|
||||||
let corners = [[0, 0], [1, 1], [0, 1], [1, 0]]; // Top-left, bottom-right, bottom-left, top-right |
|
||||||
|
|
||||||
videos.forEach((video, i) => { |
|
||||||
// Get the corner coordinates for the current video |
|
||||||
const corner = corners[i % corners.length] |
|
||||||
|
|
||||||
// Calculate the position of the text in the corner |
|
||||||
let positionX = corner[0] ? canvas.width : padding |
|
||||||
let positionY = corner[1] ? canvas.height : padding |
|
||||||
|
|
||||||
// Adjust position to ensure text is within the canvas bounds |
|
||||||
//positionX = Math.max(0, Math.min(positionX, canvas.width - ctx.measureText(getFilename(video.src)).width)) |
|
||||||
positionY = Math.max(0, Math.min(positionY, canvas.height - padding*3)) |
|
||||||
|
|
||||||
// Set text alignment based on corner |
|
||||||
ctx.textAlign = corner[0] ? "right" : "left" |
|
||||||
ctx.textBaseline = corner[1] ? "bottom" : "top" |
|
||||||
|
|
||||||
// Draw debug text for the video |
|
||||||
ctx.fillText(getFilename(video.src), positionX, positionY) |
|
||||||
ctx.fillText(`Dimensions: ${video.videoWidth}x${video.videoHeight} ()`, positionX, positionY + 20) |
|
||||||
ctx.fillText(formatTime(video.currentTime) + "/" + formatTime(video.duration), positionX, positionY + 40) |
|
||||||
ctx.fillText(`${video.globalCompositeOperation}` ,positionX,positionY+60) |
|
||||||
// Add more debug information as needed |
|
||||||
}) |
|
||||||
} |
|
||||||
|
|
||||||
// Function to extract filename from full path |
|
||||||
function getFilename(src) { |
|
||||||
const parts = src.split('/') |
|
||||||
return decodeURIComponent(parts[parts.length - 1].replace(/\%20/g, ' ')) |
|
||||||
} |
|
||||||
|
|
||||||
function formatTime(seconds) { |
|
||||||
const hours = Math.floor(seconds / 3600); |
|
||||||
const minutes = Math.floor((seconds % 3600) / 60); |
|
||||||
const remainingSeconds = Math.floor(seconds % 60); |
|
||||||
return `${(hours ? hours.toString().padStart(2, '0') + ':' : '')}` + |
|
||||||
`${minutes.toString().padStart(2, '0')}:`+ |
|
||||||
`${remainingSeconds.toString().padStart(2, '0')}`; |
|
||||||
} |
|
||||||
|
|
||||||
|
|
||||||
function updateCanvasSize() { |
|
||||||
let maxAspectRatio = 0; |
|
||||||
|
|
||||||
// Calculate the maximum aspect ratio among all videos |
|
||||||
for (let i = 0; i < videos.length; i++) { |
|
||||||
const videoAspectRatio = videos[i].videoWidth / videos[i].videoHeight; |
|
||||||
maxAspectRatio = Math.max(maxAspectRatio, videoAspectRatio); |
|
||||||
} |
|
||||||
|
|
||||||
canvas.width = window.innerWidth; |
|
||||||
canvas.height = window.innerWidth / maxAspectRatio; |
|
||||||
} |
|
||||||
|
|
||||||
window.addEventListener("resize", updateCanvasSize) |
|
||||||
|
|
||||||
function getSourceFiles() { |
|
||||||
debugLog("Getting source files") |
|
||||||
return fetch("/api/videos") |
|
||||||
.then(response => response.json()) |
|
||||||
.then(videoFiles => { |
|
||||||
debugLog("Success") |
|
||||||
cachedVideos = videoFiles; |
|
||||||
}) |
|
||||||
.catch(error => console.error("Error fetching videos:", error)); |
|
||||||
} |
|
||||||
|
|
||||||
function selectRandomVideo(videoElement) { |
|
||||||
if (cachedVideos.length) { |
|
||||||
const currentSrc = videoElement.src; |
|
||||||
const filteredVideos = cachedVideos.filter(video => video.src !== currentSrc); |
|
||||||
if (filteredVideos.length > 0) { |
|
||||||
const randomIndex = Math.floor(Math.random() * filteredVideos.length); |
|
||||||
const randomVideo = filteredVideos[randomIndex]; |
|
||||||
videoElement.src = randomVideo.src; |
|
||||||
} else { |
|
||||||
debugLog("No other videos available."); |
|
||||||
getSourceFiles().then(()=>{ |
|
||||||
selectRandomVideo(videoElement) |
|
||||||
}) |
|
||||||
} |
|
||||||
} else { |
|
||||||
debugLog("Cache empty, doing new request") |
|
||||||
getSourceFiles().then(()=>{ |
|
||||||
selectRandomVideo(videoElement) |
|
||||||
}) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
document.addEventListener('keydown', function(event) { |
|
||||||
console.log('keyDown',event) |
|
||||||
if (event.key === 'Enter') { |
|
||||||
showText = !showText |
|
||||||
} |
|
||||||
if (event.key === 'ArrowDown') { |
|
||||||
loadVideos(-1) |
|
||||||
} |
|
||||||
if (event.key === 'ArrowUp') { |
|
||||||
loadVideos(1) |
|
||||||
} |
|
||||||
}) |
|
||||||
|
|
||||||
getSourceFiles().then(()=>{ |
|
||||||
loadVideos(videosToLoad) |
|
||||||
updateCanvasSize() |
|
||||||
}) |
|
||||||
@ -1,74 +0,0 @@ |
|||||||
1. Refactor Effects and Transformations into Separate Modules |
|
||||||
|
|
||||||
Current State: Right now, drawVideos() performs various tasks like scaling, positioning, blending, and text overlay, all within the same function. |
|
||||||
|
|
||||||
Suggested Refactor: Break out each effect or transformation (e.g., scaleVideo(), applyBlendMode(), applyFilter(), addDebugText()) into standalone functions or classes. A module-based approach allows each effect to be applied conditionally, stacked, or even reused across different video instances. For example: |
|
||||||
|
|
||||||
javascript |
|
||||||
function applyTransformations(video, ctx, params) { |
|
||||||
// Transformation logic based on params (scale, position, etc.) |
|
||||||
} |
|
||||||
function applyBlend(video, ctx, params) { |
|
||||||
// Blending logic here |
|
||||||
} |
|
||||||
2. Event-Based State Management |
|
||||||
|
|
||||||
Current State: You use key events and parameter toggles to control playback and load video elements. |
|
||||||
|
|
||||||
Suggested Refactor: Introduce an event system where modules communicate changes (e.g., “video loaded,” “blend mode changed”) through custom events. This approach decouples each part of the visualizer, so new effects or controls can easily subscribe to or emit events without direct dependencies on other modules. This can be done with a simple pub/sub pattern. |
|
||||||
3. Preset Loader and Storage |
|
||||||
|
|
||||||
Current State: Presets are hardcoded in parameterStore. |
|
||||||
|
|
||||||
Suggested Refactor: Implement a preset loader that pulls settings from JSON files, enabling user-configurable effects without code changes. This also simplifies future changes to the preset structure, as they’re centralized in external files. You could load presets on-demand and allow users to save new presets in JSON format. |
|
||||||
|
|
||||||
javascript |
|
||||||
function loadPresetsFromFile(filePath) { |
|
||||||
fetch(filePath) |
|
||||||
.then(res => res.json()) |
|
||||||
.then(data => parameterStore.presets = data) |
|
||||||
.catch(console.error); |
|
||||||
} |
|
||||||
4. Optimize Video Loading with Lazy Loading or Pooled Videos |
|
||||||
|
|
||||||
Current State: loadVideos() loads all videos up to maxVideosToLoad. |
|
||||||
|
|
||||||
Suggested Refactor: Implement lazy loading or pooling to load only the required videos, and recycle elements instead of creating/destroying them each time. This reduces memory usage and loading times, especially for high numbers of videos. For example, maintain a pool of video elements and reuse them as needed, updating only the source URL. |
|
||||||
Another approach is to pre-cache metadata (like dimensions and duration) when a video is first loaded, avoiding redundant network calls. |
|
||||||
5. Asynchronous Asset Preloading |
|
||||||
|
|
||||||
Current State: Video sources are fetched synchronously in getSourceFiles(). |
|
||||||
|
|
||||||
Suggested Refactor: Use asynchronous preloading with a Promise.all() to wait until all videos are cached before starting playback. This approach can reduce playback delays and ensure smoother transitions between videos: |
|
||||||
|
|
||||||
javascript |
|
||||||
async function preloadVideos() { |
|
||||||
const videoFiles = await getSourceFiles(); |
|
||||||
await Promise.all(videoFiles.map(file => new Promise(resolve => { |
|
||||||
const video = document.createElement("video"); |
|
||||||
video.src = file.src; |
|
||||||
video.onloadeddata = () => resolve(video); |
|
||||||
}))); |
|
||||||
// Videos are now preloaded |
|
||||||
} |
|
||||||
6. Configuration-Driven Design |
|
||||||
|
|
||||||
Current State: Parameters and configurations are hardcoded. |
|
||||||
|
|
||||||
Suggested Refactor: Move configurable parameters (like videosToLoad, blend modes, etc.) to a centralized configuration file (e.g., config.json). This allows you to modify settings without touching code and opens up the possibility for user-customized configurations down the line. |
|
||||||
7. Separate Rendering Logic into a Renderer Class |
|
||||||
|
|
||||||
Current State: The rendering logic in drawVideos() mixes canvas drawing with UI and blending logic. |
|
||||||
|
|
||||||
Suggested Refactor: Create a dedicated Renderer class that handles rendering tasks, such as updating the canvas size, drawing each video, and applying effects. This keeps rendering independent of state changes, making the code more modular and allowing the Renderer to be swapped out or modified independently of other systems. |
|
||||||
8. Implement Dependency Injection for Testing |
|
||||||
|
|
||||||
Current State: Dependencies like parameterStore and ctx are used directly. |
|
||||||
|
|
||||||
Suggested Refactor: Use dependency injection to pass in these dependencies, enabling isolated testing of each module. For example, applyBlendMode(video, ctx, params) could take ctx and params as arguments, making it easier to test blending logic independently. |
|
||||||
9. Use requestIdleCallback for Non-Urgent Tasks |
|
||||||
|
|
||||||
Current State: Some tasks, like updating debug text or checking video state, run continuously. |
|
||||||
|
|
||||||
Suggested Refactor: For tasks that don’t need to run every frame, consider requestIdleCallback, which allows the browser to perform non-urgent tasks during idle time. This approach optimizes CPU usage and can improve frame rates. |
|
||||||
These adjustments will simplify future expansion, reduce memory and CPU usage, and improve readability and maintainability, making the project easier to evolve over time. |
|
||||||
Loading…
Reference in new issue