Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
•
e864e26
1
Parent(s):
a54215e
upgrade dependencies
Browse files- package-lock.json +38 -38
- package.json +3 -3
- src/app/api/actions/ai-tube-hf/parseChannel.ts +5 -4
- src/app/api/actions/ai-tube-hf/uploadVideoRequestToDataset.ts +5 -2
- src/app/api/actions/submitVideoRequest.ts +4 -2
- src/app/api/generators/search/defaultChannel.ts +3 -2
- src/app/api/generators/search/getNewMediaInfo.ts +2 -1
- src/app/api/generators/search/searchResultToMediaInfo.ts +5 -7
- src/app/api/parsers/parseDatasetPrompt.ts +4 -4
- src/app/api/parsers/parseDatasetReadme.ts +6 -4
- src/app/api/parsers/parseVideoOrientation.ts +0 -32
- src/app/api/utils/computeOrientationProjectionWidthHeight.ts +4 -4
- src/app/api/v1/create/index.ts +3 -3
- src/app/api/v1/edit/entities/clapToLatentStory.ts +4 -4
- src/app/api/v1/edit/storyboards/processShot.ts +6 -4
- src/app/config.ts +1 -2
- src/app/views/user-channel-view/index.tsx +7 -7
- src/components/interface/latent-engine/core/useLatentEngine.ts +2 -2
- src/components/interface/latent-engine/resolvers/resolveSegment.ts +4 -4
- src/types/general.ts +6 -11
package-lock.json
CHANGED
@@ -8,9 +8,9 @@
|
|
8 |
"name": "@aitube/website",
|
9 |
"version": "0.0.0",
|
10 |
"dependencies": {
|
11 |
-
"@aitube/clap": "0.0.
|
12 |
-
"@aitube/client": "0.0.
|
13 |
-
"@aitube/engine": "0.0.
|
14 |
"@huggingface/hub": "0.12.3-oauth",
|
15 |
"@huggingface/inference": "^2.6.7",
|
16 |
"@jcoreio/async-throttle": "^1.6.0",
|
@@ -117,9 +117,9 @@
|
|
117 |
}
|
118 |
},
|
119 |
"node_modules/@aitube/clap": {
|
120 |
-
"version": "0.0.
|
121 |
-
"resolved": "https://registry.npmjs.org/@aitube/clap/-/clap-0.0.
|
122 |
-
"integrity": "sha512-
|
123 |
"dependencies": {
|
124 |
"pure-uuid": "^1.8.1",
|
125 |
"yaml": "^2.4.1"
|
@@ -129,22 +129,22 @@
|
|
129 |
}
|
130 |
},
|
131 |
"node_modules/@aitube/client": {
|
132 |
-
"version": "0.0.
|
133 |
-
"resolved": "https://registry.npmjs.org/@aitube/client/-/client-0.0.
|
134 |
-
"integrity": "sha512-
|
135 |
"dependencies": {
|
136 |
"query-string": "^9.0.0"
|
137 |
},
|
138 |
"peerDependencies": {
|
139 |
-
"@aitube/clap": "0.0.
|
140 |
}
|
141 |
},
|
142 |
"node_modules/@aitube/engine": {
|
143 |
-
"version": "0.0.
|
144 |
-
"resolved": "https://registry.npmjs.org/@aitube/engine/-/engine-0.0.
|
145 |
-
"integrity": "sha512-
|
146 |
"peerDependencies": {
|
147 |
-
"@aitube/clap": "0.0.
|
148 |
}
|
149 |
},
|
150 |
"node_modules/@alloc/quick-lru": {
|
@@ -3176,9 +3176,9 @@
|
|
3176 |
}
|
3177 |
},
|
3178 |
"node_modules/@upstash/redis": {
|
3179 |
-
"version": "1.30.
|
3180 |
-
"resolved": "https://registry.npmjs.org/@upstash/redis/-/redis-1.30.
|
3181 |
-
"integrity": "sha512-
|
3182 |
"dependencies": {
|
3183 |
"crypto-js": "^4.2.0"
|
3184 |
}
|
@@ -3941,9 +3941,9 @@
|
|
3941 |
}
|
3942 |
},
|
3943 |
"node_modules/cookies-next/node_modules/@types/node": {
|
3944 |
-
"version": "16.18.
|
3945 |
-
"resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.
|
3946 |
-
"integrity": "sha512-
|
3947 |
},
|
3948 |
"node_modules/copy-to-clipboard": {
|
3949 |
"version": "3.3.3",
|
@@ -4317,9 +4317,9 @@
|
|
4317 |
"integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
|
4318 |
},
|
4319 |
"node_modules/electron-to-chromium": {
|
4320 |
-
"version": "1.4.
|
4321 |
-
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.
|
4322 |
-
"integrity": "sha512-
|
4323 |
},
|
4324 |
"node_modules/elliptic": {
|
4325 |
"version": "6.5.4",
|
@@ -4346,9 +4346,9 @@
|
|
4346 |
"integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
|
4347 |
},
|
4348 |
"node_modules/enhanced-resolve": {
|
4349 |
-
"version": "5.16.
|
4350 |
-
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.16.
|
4351 |
-
"integrity": "sha512-
|
4352 |
"dependencies": {
|
4353 |
"graceful-fs": "^4.2.4",
|
4354 |
"tapable": "^2.2.0"
|
@@ -5355,9 +5355,9 @@
|
|
5355 |
}
|
5356 |
},
|
5357 |
"node_modules/get-tsconfig": {
|
5358 |
-
"version": "4.7.
|
5359 |
-
"resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.7.
|
5360 |
-
"integrity": "sha512-
|
5361 |
"dependencies": {
|
5362 |
"resolve-pkg-maps": "^1.0.0"
|
5363 |
},
|
@@ -6688,9 +6688,9 @@
|
|
6688 |
}
|
6689 |
},
|
6690 |
"node_modules/openai": {
|
6691 |
-
"version": "4.
|
6692 |
-
"resolved": "https://registry.npmjs.org/openai/-/openai-4.
|
6693 |
-
"integrity": "sha512-
|
6694 |
"dependencies": {
|
6695 |
"@types/node": "^18.11.18",
|
6696 |
"@types/node-fetch": "^2.6.4",
|
@@ -6706,9 +6706,9 @@
|
|
6706 |
}
|
6707 |
},
|
6708 |
"node_modules/openai/node_modules/@types/node": {
|
6709 |
-
"version": "18.19.
|
6710 |
-
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.
|
6711 |
-
"integrity": "sha512-
|
6712 |
"dependencies": {
|
6713 |
"undici-types": "~5.26.4"
|
6714 |
}
|
@@ -8307,9 +8307,9 @@
|
|
8307 |
}
|
8308 |
},
|
8309 |
"node_modules/type-fest": {
|
8310 |
-
"version": "4.18.
|
8311 |
-
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.18.
|
8312 |
-
"integrity": "sha512
|
8313 |
"engines": {
|
8314 |
"node": ">=16"
|
8315 |
},
|
|
|
8 |
"name": "@aitube/website",
|
9 |
"version": "0.0.0",
|
10 |
"dependencies": {
|
11 |
+
"@aitube/clap": "0.0.14",
|
12 |
+
"@aitube/client": "0.0.17",
|
13 |
+
"@aitube/engine": "0.0.4",
|
14 |
"@huggingface/hub": "0.12.3-oauth",
|
15 |
"@huggingface/inference": "^2.6.7",
|
16 |
"@jcoreio/async-throttle": "^1.6.0",
|
|
|
117 |
}
|
118 |
},
|
119 |
"node_modules/@aitube/clap": {
|
120 |
+
"version": "0.0.14",
|
121 |
+
"resolved": "https://registry.npmjs.org/@aitube/clap/-/clap-0.0.14.tgz",
|
122 |
+
"integrity": "sha512-i4mq3YFecWVOTS/p5QaSQ0VJfurKXlyRc8FJMqKI6P/7rpf4vE4IL+jBKa4HPsYeNt85/KOt3MJKEFVtgiWGfQ==",
|
123 |
"dependencies": {
|
124 |
"pure-uuid": "^1.8.1",
|
125 |
"yaml": "^2.4.1"
|
|
|
129 |
}
|
130 |
},
|
131 |
"node_modules/@aitube/client": {
|
132 |
+
"version": "0.0.17",
|
133 |
+
"resolved": "https://registry.npmjs.org/@aitube/client/-/client-0.0.17.tgz",
|
134 |
+
"integrity": "sha512-waRA1k2pqKI7uOXUnBs6y056JY2h7LO+kzKDcHBiNSAyC0ZvSvP7VqTia2fxpF99rik6HgmC3N0AslObP4T6Zw==",
|
135 |
"dependencies": {
|
136 |
"query-string": "^9.0.0"
|
137 |
},
|
138 |
"peerDependencies": {
|
139 |
+
"@aitube/clap": "0.0.14"
|
140 |
}
|
141 |
},
|
142 |
"node_modules/@aitube/engine": {
|
143 |
+
"version": "0.0.4",
|
144 |
+
"resolved": "https://registry.npmjs.org/@aitube/engine/-/engine-0.0.4.tgz",
|
145 |
+
"integrity": "sha512-YPVoW9u48PB/UW9PXxkz0nvSiCWQEEPr9mZyx/IRzQdTqC1woM/3p5m79cjoCuo9yT4pw1WIr0jR+9+6xwaW7Q==",
|
146 |
"peerDependencies": {
|
147 |
+
"@aitube/clap": "0.0.14"
|
148 |
}
|
149 |
},
|
150 |
"node_modules/@alloc/quick-lru": {
|
|
|
3176 |
}
|
3177 |
},
|
3178 |
"node_modules/@upstash/redis": {
|
3179 |
+
"version": "1.30.1",
|
3180 |
+
"resolved": "https://registry.npmjs.org/@upstash/redis/-/redis-1.30.1.tgz",
|
3181 |
+
"integrity": "sha512-Cmk2cvm1AcD6mKLg/UFhQDzM+H1HsX/k5ufvNL4Kii8DsMTKmadMJ1rRZEGQ/SM7H51EeOL/YSa6K2EPc1SYPA==",
|
3182 |
"dependencies": {
|
3183 |
"crypto-js": "^4.2.0"
|
3184 |
}
|
|
|
3941 |
}
|
3942 |
},
|
3943 |
"node_modules/cookies-next/node_modules/@types/node": {
|
3944 |
+
"version": "16.18.97",
|
3945 |
+
"resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.97.tgz",
|
3946 |
+
"integrity": "sha512-4muilE1Lbfn57unR+/nT9AFjWk0MtWi5muwCEJqnOvfRQDbSfLCUdN7vCIg8TYuaANfhLOV85ve+FNpiUsbSRg=="
|
3947 |
},
|
3948 |
"node_modules/copy-to-clipboard": {
|
3949 |
"version": "3.3.3",
|
|
|
4317 |
"integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
|
4318 |
},
|
4319 |
"node_modules/electron-to-chromium": {
|
4320 |
+
"version": "1.4.757",
|
4321 |
+
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.757.tgz",
|
4322 |
+
"integrity": "sha512-jftDaCknYSSt/+KKeXzH3LX5E2CvRLm75P3Hj+J/dv3CL0qUYcOt13d5FN1NiL5IJbbhzHrb3BomeG2tkSlZmw=="
|
4323 |
},
|
4324 |
"node_modules/elliptic": {
|
4325 |
"version": "6.5.4",
|
|
|
4346 |
"integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
|
4347 |
},
|
4348 |
"node_modules/enhanced-resolve": {
|
4349 |
+
"version": "5.16.1",
|
4350 |
+
"resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.16.1.tgz",
|
4351 |
+
"integrity": "sha512-4U5pNsuDl0EhuZpq46M5xPslstkviJuhrdobaRDBk2Jy2KO37FDAJl4lb2KlNabxT0m4MTK2UHNrsAcphE8nyw==",
|
4352 |
"dependencies": {
|
4353 |
"graceful-fs": "^4.2.4",
|
4354 |
"tapable": "^2.2.0"
|
|
|
5355 |
}
|
5356 |
},
|
5357 |
"node_modules/get-tsconfig": {
|
5358 |
+
"version": "4.7.4",
|
5359 |
+
"resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.7.4.tgz",
|
5360 |
+
"integrity": "sha512-ofbkKj+0pjXjhejr007J/fLf+sW+8H7K5GCm+msC8q3IpvgjobpyPqSRFemNyIMxklC0zeJpi7VDFna19FacvQ==",
|
5361 |
"dependencies": {
|
5362 |
"resolve-pkg-maps": "^1.0.0"
|
5363 |
},
|
|
|
6688 |
}
|
6689 |
},
|
6690 |
"node_modules/openai": {
|
6691 |
+
"version": "4.42.0",
|
6692 |
+
"resolved": "https://registry.npmjs.org/openai/-/openai-4.42.0.tgz",
|
6693 |
+
"integrity": "sha512-xbiQQ2YNqdkE6cHqeWKa7lsAvdYfgp84XiNFOVkAMa6+9KpmOL4hCWCRR6e6I/clpaens/T9XeLVtyC5StXoRw==",
|
6694 |
"dependencies": {
|
6695 |
"@types/node": "^18.11.18",
|
6696 |
"@types/node-fetch": "^2.6.4",
|
|
|
6706 |
}
|
6707 |
},
|
6708 |
"node_modules/openai/node_modules/@types/node": {
|
6709 |
+
"version": "18.19.32",
|
6710 |
+
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.32.tgz",
|
6711 |
+
"integrity": "sha512-2bkg93YBSDKk8DLmmHnmj/Rwr18TLx7/n+I23BigFwgexUJoMHZOd8X1OFxuF/W3NN0S2W2E5sVabI5CPinNvA==",
|
6712 |
"dependencies": {
|
6713 |
"undici-types": "~5.26.4"
|
6714 |
}
|
|
|
8307 |
}
|
8308 |
},
|
8309 |
"node_modules/type-fest": {
|
8310 |
+
"version": "4.18.2",
|
8311 |
+
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.18.2.tgz",
|
8312 |
+
"integrity": "sha512-+suCYpfJLAe4OXS6+PPXjW3urOS4IoP9waSiLuXfLgqZODKw/aWwASvzqE886wA0kQgGy0mIWyhd87VpqIy6Xg==",
|
8313 |
"engines": {
|
8314 |
"node": ">=16"
|
8315 |
},
|
package.json
CHANGED
@@ -10,9 +10,9 @@
|
|
10 |
"lint": "next lint"
|
11 |
},
|
12 |
"dependencies": {
|
13 |
-
"@aitube/clap": "0.0.
|
14 |
-
"@aitube/client": "0.0.
|
15 |
-
"@aitube/engine": "0.0.
|
16 |
"@huggingface/hub": "0.12.3-oauth",
|
17 |
"@huggingface/inference": "^2.6.7",
|
18 |
"@jcoreio/async-throttle": "^1.6.0",
|
|
|
10 |
"lint": "next lint"
|
11 |
},
|
12 |
"dependencies": {
|
13 |
+
"@aitube/clap": "0.0.14",
|
14 |
+
"@aitube/client": "0.0.17",
|
15 |
+
"@aitube/engine": "0.0.4",
|
16 |
"@huggingface/hub": "0.12.3-oauth",
|
17 |
"@huggingface/inference": "^2.6.7",
|
18 |
"@jcoreio/async-throttle": "^1.6.0",
|
src/app/api/actions/ai-tube-hf/parseChannel.ts
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
"use server"
|
2 |
|
|
|
3 |
import { Credentials, downloadFile, whoAmI } from "@/lib/huggingface/hub/src"
|
4 |
import { parseDatasetReadme } from "@/app/api/parsers/parseDatasetReadme"
|
5 |
-
import { ChannelInfo, VideoGenerationModel
|
|
|
6 |
|
7 |
import { adminCredentials } from "../config"
|
8 |
-
import { defaultVideoModel, defaultVideoOrientation } from "@/app/config"
|
9 |
|
10 |
export async function parseChannel(options: {
|
11 |
id: string
|
@@ -78,7 +79,7 @@ export async function parseChannel(options: {
|
|
78 |
let voice = ""
|
79 |
let music = ""
|
80 |
let tags: string[] = []
|
81 |
-
let orientation:
|
82 |
|
83 |
// console.log(`going to read datasets/${name}`)
|
84 |
try {
|
@@ -102,7 +103,7 @@ export async function parseChannel(options: {
|
|
102 |
style = parsedDatasetReadme.style || ""
|
103 |
voice = parsedDatasetReadme.voice || ""
|
104 |
music = parsedDatasetReadme.music || ""
|
105 |
-
orientation = parsedDatasetReadme.orientation ||
|
106 |
|
107 |
thumbnail =
|
108 |
thumbnail.startsWith("http")
|
|
|
1 |
"use server"
|
2 |
|
3 |
+
import { ClapMediaOrientation, defaultMediaOrientation } from "@aitube/clap"
|
4 |
import { Credentials, downloadFile, whoAmI } from "@/lib/huggingface/hub/src"
|
5 |
import { parseDatasetReadme } from "@/app/api/parsers/parseDatasetReadme"
|
6 |
+
import { ChannelInfo, VideoGenerationModel } from "@/types/general"
|
7 |
+
import { defaultVideoModel } from "@/app/config"
|
8 |
|
9 |
import { adminCredentials } from "../config"
|
|
|
10 |
|
11 |
export async function parseChannel(options: {
|
12 |
id: string
|
|
|
79 |
let voice = ""
|
80 |
let music = ""
|
81 |
let tags: string[] = []
|
82 |
+
let orientation: ClapMediaOrientation = defaultMediaOrientation
|
83 |
|
84 |
// console.log(`going to read datasets/${name}`)
|
85 |
try {
|
|
|
103 |
style = parsedDatasetReadme.style || ""
|
104 |
voice = parsedDatasetReadme.voice || ""
|
105 |
music = parsedDatasetReadme.music || ""
|
106 |
+
orientation = parsedDatasetReadme.orientation || defaultMediaOrientation
|
107 |
|
108 |
thumbnail =
|
109 |
thumbnail.startsWith("http")
|
src/app/api/actions/ai-tube-hf/uploadVideoRequestToDataset.ts
CHANGED
@@ -2,8 +2,11 @@
|
|
2 |
|
3 |
import { Blob } from "buffer"
|
4 |
|
|
|
|
|
5 |
import { Credentials, uploadFile, whoAmI } from "@/lib/huggingface/hub/src"
|
6 |
-
import { ChannelInfo, VideoGenerationModel, MediaInfo,
|
|
|
7 |
import { formatPromptFileName } from "../../utils/formatPromptFileName"
|
8 |
import { computeOrientationProjectionWidthHeight } from "../../utils/computeOrientationProjectionWidthHeight"
|
9 |
|
@@ -38,7 +41,7 @@ export async function uploadVideoRequestToDataset({
|
|
38 |
music: string
|
39 |
tags: string[]
|
40 |
duration: number
|
41 |
-
orientation:
|
42 |
}): Promise<{
|
43 |
videoRequest: VideoRequest
|
44 |
videoInfo: MediaInfo
|
|
|
2 |
|
3 |
import { Blob } from "buffer"
|
4 |
|
5 |
+
import { ClapMediaOrientation } from "@aitube/clap"
|
6 |
+
|
7 |
import { Credentials, uploadFile, whoAmI } from "@/lib/huggingface/hub/src"
|
8 |
+
import { ChannelInfo, VideoGenerationModel, MediaInfo, VideoRequest } from "@/types/general"
|
9 |
+
|
10 |
import { formatPromptFileName } from "../../utils/formatPromptFileName"
|
11 |
import { computeOrientationProjectionWidthHeight } from "../../utils/computeOrientationProjectionWidthHeight"
|
12 |
|
|
|
41 |
music: string
|
42 |
tags: string[]
|
43 |
duration: number
|
44 |
+
orientation: ClapMediaOrientation
|
45 |
}): Promise<{
|
46 |
videoRequest: VideoRequest
|
47 |
videoInfo: MediaInfo
|
src/app/api/actions/submitVideoRequest.ts
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
"use server"
|
2 |
|
3 |
-
import {
|
|
|
|
|
4 |
|
5 |
import { uploadVideoRequestToDataset } from "./ai-tube-hf/uploadVideoRequestToDataset"
|
6 |
|
@@ -31,7 +33,7 @@ export async function submitVideoRequest({
|
|
31 |
music: string
|
32 |
tags: string[]
|
33 |
duration: number
|
34 |
-
orientation:
|
35 |
}): Promise<MediaInfo> {
|
36 |
if (!apiKey) {
|
37 |
throw new Error(`the apiKey is required`)
|
|
|
1 |
"use server"
|
2 |
|
3 |
+
import { ClapMediaOrientation } from "@aitube/clap"
|
4 |
+
|
5 |
+
import { ChannelInfo, VideoGenerationModel, MediaInfo } from "@/types/general"
|
6 |
|
7 |
import { uploadVideoRequestToDataset } from "./ai-tube-hf/uploadVideoRequestToDataset"
|
8 |
|
|
|
33 |
music: string
|
34 |
tags: string[]
|
35 |
duration: number
|
36 |
+
orientation: ClapMediaOrientation
|
37 |
}): Promise<MediaInfo> {
|
38 |
if (!apiKey) {
|
39 |
throw new Error(`the apiKey is required`)
|
src/app/api/generators/search/defaultChannel.ts
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
-
import { ChannelInfo } from "@/types/general"
|
|
|
2 |
|
3 |
export const defaultChannel: ChannelInfo = {
|
4 |
/**
|
@@ -66,5 +67,5 @@ export const defaultChannel: ChannelInfo = {
|
|
66 |
/**
|
67 |
* Default video orientation
|
68 |
*/
|
69 |
-
orientation:
|
70 |
}
|
|
|
1 |
+
import { ChannelInfo } from "@/types/general"
|
2 |
+
import { defaultMediaOrientation } from "@aitube/clap"
|
3 |
|
4 |
export const defaultChannel: ChannelInfo = {
|
5 |
/**
|
|
|
67 |
/**
|
68 |
* Default video orientation
|
69 |
*/
|
70 |
+
orientation: defaultMediaOrientation
|
71 |
}
|
src/app/api/generators/search/getNewMediaInfo.ts
CHANGED
@@ -5,6 +5,7 @@ import {
|
|
5 |
MediaInfo,
|
6 |
} from "@/types/general"
|
7 |
import { defaultChannel } from "./defaultChannel"
|
|
|
8 |
|
9 |
export function getNewMediaInfo(params: Partial<MediaInfo> = {}): MediaInfo {
|
10 |
|
@@ -132,7 +133,7 @@ export function getNewMediaInfo(params: Partial<MediaInfo> = {}): MediaInfo {
|
|
132 |
/**
|
133 |
* General media aspect ratio
|
134 |
*/
|
135 |
-
orientation:
|
136 |
|
137 |
/**
|
138 |
* Media projection (cartesian by default)
|
|
|
5 |
MediaInfo,
|
6 |
} from "@/types/general"
|
7 |
import { defaultChannel } from "./defaultChannel"
|
8 |
+
import { defaultMediaOrientation } from "@aitube/clap"
|
9 |
|
10 |
export function getNewMediaInfo(params: Partial<MediaInfo> = {}): MediaInfo {
|
11 |
|
|
|
133 |
/**
|
134 |
* General media aspect ratio
|
135 |
*/
|
136 |
+
orientation: defaultMediaOrientation,
|
137 |
|
138 |
/**
|
139 |
* Media projection (cartesian by default)
|
src/app/api/generators/search/searchResultToMediaInfo.ts
CHANGED
@@ -3,15 +3,13 @@ import { v4 as uuidv4 } from "uuid"
|
|
3 |
import {
|
4 |
ChannelInfo,
|
5 |
MediaInfo,
|
6 |
-
VideoStatus,
|
7 |
-
VideoGenerationModel,
|
8 |
-
MediaProjection,
|
9 |
-
VideoOrientation
|
10 |
} from "@/types/general"
|
11 |
|
12 |
-
import { LatentSearchResult, LatentSearchResults } from "./types"
|
13 |
import { newRender } from "../../providers/videochain/renderWithVideoChain"
|
14 |
|
|
|
|
|
|
|
15 |
const channel: ChannelInfo = {
|
16 |
/**
|
17 |
* We actually use the dataset ID for the channel ID.
|
@@ -78,7 +76,7 @@ const channel: ChannelInfo = {
|
|
78 |
/**
|
79 |
* Default video orientation
|
80 |
*/
|
81 |
-
orientation:
|
82 |
}
|
83 |
|
84 |
export async function searchResultToMediaInfo(searchResult: LatentSearchResult): Promise<MediaInfo> {
|
@@ -220,7 +218,7 @@ export async function searchResultToMediaInfo(searchResult: LatentSearchResult):
|
|
220 |
/**
|
221 |
* General media aspect ratio
|
222 |
*/
|
223 |
-
orientation:
|
224 |
|
225 |
/**
|
226 |
* Media projection (cartesian by default)
|
|
|
3 |
import {
|
4 |
ChannelInfo,
|
5 |
MediaInfo,
|
|
|
|
|
|
|
|
|
6 |
} from "@/types/general"
|
7 |
|
|
|
8 |
import { newRender } from "../../providers/videochain/renderWithVideoChain"
|
9 |
|
10 |
+
import { LatentSearchResult } from "./types"
|
11 |
+
import { defaultMediaOrientation } from "@aitube/clap"
|
12 |
+
|
13 |
const channel: ChannelInfo = {
|
14 |
/**
|
15 |
* We actually use the dataset ID for the channel ID.
|
|
|
76 |
/**
|
77 |
* Default video orientation
|
78 |
*/
|
79 |
+
orientation: defaultMediaOrientation
|
80 |
}
|
81 |
|
82 |
export async function searchResultToMediaInfo(searchResult: LatentSearchResult): Promise<MediaInfo> {
|
|
|
218 |
/**
|
219 |
* General media aspect ratio
|
220 |
*/
|
221 |
+
orientation: defaultMediaOrientation,
|
222 |
|
223 |
/**
|
224 |
* Media projection (cartesian by default)
|
src/app/api/parsers/parseDatasetPrompt.ts
CHANGED
@@ -1,8 +1,8 @@
|
|
|
|
1 |
|
2 |
import { ChannelInfo, ParsedDatasetPrompt } from "@/types/general"
|
3 |
import { parseVideoModelName } from "./parseVideoModelName"
|
4 |
-
import {
|
5 |
-
import { defaultVideoModel, defaultVideoOrientation } from "@/app/config"
|
6 |
|
7 |
export function parseDatasetPrompt(markdown: string, channel: ChannelInfo): ParsedDatasetPrompt {
|
8 |
try {
|
@@ -36,7 +36,7 @@ export function parseDatasetPrompt(markdown: string, channel: ChannelInfo): Pars
|
|
36 |
thumbnail: typeof thumbnail === "string" && thumbnail ? thumbnail : "",
|
37 |
voice: typeof voice === "string" && voice ? voice : (channel.voice || ""),
|
38 |
music: typeof music === "string" && music ? music : (channel.music || ""),
|
39 |
-
orientation:
|
40 |
}
|
41 |
} catch (err) {
|
42 |
return {
|
@@ -50,7 +50,7 @@ export function parseDatasetPrompt(markdown: string, channel: ChannelInfo): Pars
|
|
50 |
thumbnail: "",
|
51 |
voice: channel.voice || "",
|
52 |
music: channel.music || "",
|
53 |
-
orientation: channel.orientation ||
|
54 |
}
|
55 |
}
|
56 |
}
|
|
|
1 |
+
import { parseMediaOrientation, defaultMediaOrientation } from "@aitube/clap"
|
2 |
|
3 |
import { ChannelInfo, ParsedDatasetPrompt } from "@/types/general"
|
4 |
import { parseVideoModelName } from "./parseVideoModelName"
|
5 |
+
import { defaultVideoModel } from "@/app/config"
|
|
|
6 |
|
7 |
export function parseDatasetPrompt(markdown: string, channel: ChannelInfo): ParsedDatasetPrompt {
|
8 |
try {
|
|
|
36 |
thumbnail: typeof thumbnail === "string" && thumbnail ? thumbnail : "",
|
37 |
voice: typeof voice === "string" && voice ? voice : (channel.voice || ""),
|
38 |
music: typeof music === "string" && music ? music : (channel.music || ""),
|
39 |
+
orientation: parseMediaOrientation(orientation, channel.orientation),
|
40 |
}
|
41 |
} catch (err) {
|
42 |
return {
|
|
|
50 |
thumbnail: "",
|
51 |
voice: channel.voice || "",
|
52 |
music: channel.music || "",
|
53 |
+
orientation: channel.orientation || defaultMediaOrientation,
|
54 |
}
|
55 |
}
|
56 |
}
|
src/app/api/parsers/parseDatasetReadme.ts
CHANGED
@@ -1,10 +1,12 @@
|
|
1 |
|
2 |
import metadataParser from "markdown-yaml-metadata-parser"
|
|
|
3 |
|
4 |
import { ParsedDatasetReadme, ParsedMetadataAndContent } from "@/types/general"
|
|
|
|
|
5 |
import { parseVideoModelName } from "./parseVideoModelName"
|
6 |
-
|
7 |
-
import { defaultVideoModel, defaultVideoOrientation } from "@/app/config"
|
8 |
|
9 |
export function parseDatasetReadme(markdown: string = ""): ParsedDatasetReadme {
|
10 |
try {
|
@@ -29,7 +31,7 @@ export function parseDatasetReadme(markdown: string = ""): ParsedDatasetReadme {
|
|
29 |
music,
|
30 |
description,
|
31 |
prompt,
|
32 |
-
orientation:
|
33 |
}
|
34 |
} catch (err) {
|
35 |
return {
|
@@ -45,7 +47,7 @@ export function parseDatasetReadme(markdown: string = ""): ParsedDatasetReadme {
|
|
45 |
music: "",
|
46 |
description: "",
|
47 |
prompt: "",
|
48 |
-
orientation:
|
49 |
}
|
50 |
}
|
51 |
}
|
|
|
1 |
|
2 |
import metadataParser from "markdown-yaml-metadata-parser"
|
3 |
+
import { defaultMediaOrientation, parseMediaOrientation } from "@aitube/clap"
|
4 |
|
5 |
import { ParsedDatasetReadme, ParsedMetadataAndContent } from "@/types/general"
|
6 |
+
import { defaultVideoModel } from "@/app/config"
|
7 |
+
|
8 |
import { parseVideoModelName } from "./parseVideoModelName"
|
9 |
+
|
|
|
10 |
|
11 |
export function parseDatasetReadme(markdown: string = ""): ParsedDatasetReadme {
|
12 |
try {
|
|
|
31 |
music,
|
32 |
description,
|
33 |
prompt,
|
34 |
+
orientation: parseMediaOrientation(orientation, defaultMediaOrientation),
|
35 |
}
|
36 |
} catch (err) {
|
37 |
return {
|
|
|
47 |
music: "",
|
48 |
description: "",
|
49 |
prompt: "",
|
50 |
+
orientation: defaultMediaOrientation,
|
51 |
}
|
52 |
}
|
53 |
}
|
src/app/api/parsers/parseVideoOrientation.ts
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
import { defaultVideoOrientation } from "@/app/config"
|
2 |
-
import { VideoOrientation } from "@/types/general"
|
3 |
-
|
4 |
-
export function parseVideoOrientation(text: any, defaultToUse?: VideoOrientation): VideoOrientation {
|
5 |
-
const rawOrientationString = `${text || ""}`.trim().toLowerCase()
|
6 |
-
|
7 |
-
let orientation: VideoOrientation = defaultToUse || (defaultVideoOrientation || "landscape")
|
8 |
-
|
9 |
-
if (
|
10 |
-
rawOrientationString === "landscape" ||
|
11 |
-
rawOrientationString === "horizontal"
|
12 |
-
) {
|
13 |
-
orientation = "landscape"
|
14 |
-
}
|
15 |
-
|
16 |
-
if (
|
17 |
-
rawOrientationString === "portrait" ||
|
18 |
-
rawOrientationString === "vertical" ||
|
19 |
-
rawOrientationString === "mobile"
|
20 |
-
) {
|
21 |
-
orientation = "portrait"
|
22 |
-
}
|
23 |
-
|
24 |
-
if (
|
25 |
-
rawOrientationString === "square"
|
26 |
-
) {
|
27 |
-
orientation = "square"
|
28 |
-
}
|
29 |
-
|
30 |
-
|
31 |
-
return orientation
|
32 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/app/api/utils/computeOrientationProjectionWidthHeight.ts
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
-
import {
|
2 |
|
3 |
-
import { parseVideoOrientation } from "../parsers/parseVideoOrientation"
|
4 |
import { parseProjectionFromLoRA } from "../parsers/parseProjectionFromLoRA"
|
|
|
5 |
|
6 |
export function computeOrientationProjectionWidthHeight({
|
7 |
lora: maybeLora,
|
@@ -12,14 +12,14 @@ export function computeOrientationProjectionWidthHeight({
|
|
12 |
projection?: any
|
13 |
orientation?: any
|
14 |
}): {
|
15 |
-
orientation:
|
16 |
projection: MediaProjection
|
17 |
width: number
|
18 |
height: number
|
19 |
} {
|
20 |
|
21 |
const lora = `${maybeLora || ""}`
|
22 |
-
const orientation =
|
23 |
const projection = maybeProjection ? maybeProjection : parseProjectionFromLoRA(lora)
|
24 |
|
25 |
let width = 1024
|
|
|
1 |
+
import { MediaProjection } from "@/types/general"
|
2 |
|
|
|
3 |
import { parseProjectionFromLoRA } from "../parsers/parseProjectionFromLoRA"
|
4 |
+
import { ClapMediaOrientation, parseMediaOrientation } from "@aitube/clap"
|
5 |
|
6 |
export function computeOrientationProjectionWidthHeight({
|
7 |
lora: maybeLora,
|
|
|
12 |
projection?: any
|
13 |
orientation?: any
|
14 |
}): {
|
15 |
+
orientation: ClapMediaOrientation
|
16 |
projection: MediaProjection
|
17 |
width: number
|
18 |
height: number
|
19 |
} {
|
20 |
|
21 |
const lora = `${maybeLora || ""}`
|
22 |
+
const orientation = parseMediaOrientation(maybeOrientation)
|
23 |
const projection = maybeProjection ? maybeProjection : parseProjectionFromLoRA(lora)
|
24 |
|
25 |
let width = 1024
|
src/app/api/v1/create/index.ts
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
"use server"
|
2 |
|
3 |
-
import { ClapProject, getValidNumber, newClap, newSegment } from "@aitube/clap"
|
4 |
|
5 |
import { sleep } from "@/lib/utils/sleep"
|
6 |
import { predict } from "@/app/api/providers/huggingface/predictWithHuggingFace"
|
@@ -133,9 +133,9 @@ Output: `
|
|
133 |
track: 1,
|
134 |
startTimeInMs: currentElapsedTimeInMs,
|
135 |
assetDurationInMs: defaultSegmentDurationInMs,
|
136 |
-
category:
|
137 |
prompt: image,
|
138 |
-
outputType:
|
139 |
}))
|
140 |
|
141 |
clap.segments.push(newSegment({
|
|
|
1 |
"use server"
|
2 |
|
3 |
+
import { ClapProject, getValidNumber, newClap, newSegment, ClapSegmentCategory, ClapOutputType } from "@aitube/clap"
|
4 |
|
5 |
import { sleep } from "@/lib/utils/sleep"
|
6 |
import { predict } from "@/app/api/providers/huggingface/predictWithHuggingFace"
|
|
|
133 |
track: 1,
|
134 |
startTimeInMs: currentElapsedTimeInMs,
|
135 |
assetDurationInMs: defaultSegmentDurationInMs,
|
136 |
+
category: ClapSegmentCategory.STORYBOARD,
|
137 |
prompt: image,
|
138 |
+
outputType: ClapOutputType.IMAGE,
|
139 |
}))
|
140 |
|
141 |
clap.segments.push(newSegment({
|
src/app/api/v1/edit/entities/clapToLatentStory.ts
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import { ClapProject, ClapSegmentFilteringMode, filterSegments } from "@aitube/clap"
|
2 |
|
3 |
import { LatentStory } from "@/app/api/v1/types"
|
4 |
|
@@ -20,21 +20,21 @@ export async function clapToLatentStory(clap: ClapProject): Promise<LatentStory[
|
|
20 |
ClapSegmentFilteringMode.START,
|
21 |
shot,
|
22 |
clap.segments,
|
23 |
-
|
24 |
).at(0)
|
25 |
|
26 |
const title = filterSegments(
|
27 |
ClapSegmentFilteringMode.START,
|
28 |
shot,
|
29 |
clap.segments,
|
30 |
-
|
31 |
).at(0)
|
32 |
|
33 |
const voice = filterSegments(
|
34 |
ClapSegmentFilteringMode.START,
|
35 |
shot,
|
36 |
clap.segments,
|
37 |
-
|
38 |
).at(0)
|
39 |
|
40 |
const latentStory: LatentStory = {
|
|
|
1 |
+
import { ClapProject, ClapSegmentCategory, ClapSegmentFilteringMode, filterSegments } from "@aitube/clap"
|
2 |
|
3 |
import { LatentStory } from "@/app/api/v1/types"
|
4 |
|
|
|
20 |
ClapSegmentFilteringMode.START,
|
21 |
shot,
|
22 |
clap.segments,
|
23 |
+
ClapSegmentCategory.STORYBOARD
|
24 |
).at(0)
|
25 |
|
26 |
const title = filterSegments(
|
27 |
ClapSegmentFilteringMode.START,
|
28 |
shot,
|
29 |
clap.segments,
|
30 |
+
ClapSegmentCategory.INTERFACE
|
31 |
).at(0)
|
32 |
|
33 |
const voice = filterSegments(
|
34 |
ClapSegmentFilteringMode.START,
|
35 |
shot,
|
36 |
clap.segments,
|
37 |
+
ClapSegmentCategory.DIALOGUE
|
38 |
).at(0)
|
39 |
|
40 |
const latentStory: LatentStory = {
|
src/app/api/v1/edit/storyboards/processShot.ts
CHANGED
@@ -4,7 +4,9 @@ import {
|
|
4 |
getClapAssetSourceType,
|
5 |
newSegment,
|
6 |
filterSegments,
|
7 |
-
ClapSegmentFilteringMode
|
|
|
|
|
8 |
} from "@aitube/clap"
|
9 |
import { ClapCompletionMode } from "@aitube/client"
|
10 |
import { getVideoPrompt } from "@aitube/engine"
|
@@ -32,7 +34,7 @@ export async function processShot({
|
|
32 |
)
|
33 |
|
34 |
const shotStoryboardSegments: ClapSegment[] = shotSegments.filter(s =>
|
35 |
-
s.category ===
|
36 |
)
|
37 |
|
38 |
let shotStoryboardSegment: ClapSegment | undefined = shotStoryboardSegments.at(0)
|
@@ -44,10 +46,10 @@ export async function processShot({
|
|
44 |
startTimeInMs: shotSegment.startTimeInMs,
|
45 |
endTimeInMs: shotSegment.endTimeInMs,
|
46 |
assetDurationInMs: shotSegment.assetDurationInMs,
|
47 |
-
category:
|
48 |
prompt: "",
|
49 |
assetUrl: "",
|
50 |
-
outputType:
|
51 |
})
|
52 |
|
53 |
// we fix the existing clap
|
|
|
4 |
getClapAssetSourceType,
|
5 |
newSegment,
|
6 |
filterSegments,
|
7 |
+
ClapSegmentFilteringMode,
|
8 |
+
ClapSegmentCategory,
|
9 |
+
ClapOutputType
|
10 |
} from "@aitube/clap"
|
11 |
import { ClapCompletionMode } from "@aitube/client"
|
12 |
import { getVideoPrompt } from "@aitube/engine"
|
|
|
34 |
)
|
35 |
|
36 |
const shotStoryboardSegments: ClapSegment[] = shotSegments.filter(s =>
|
37 |
+
s.category === ClapSegmentCategory.STORYBOARD
|
38 |
)
|
39 |
|
40 |
let shotStoryboardSegment: ClapSegment | undefined = shotStoryboardSegments.at(0)
|
|
|
46 |
startTimeInMs: shotSegment.startTimeInMs,
|
47 |
endTimeInMs: shotSegment.endTimeInMs,
|
48 |
assetDurationInMs: shotSegment.assetDurationInMs,
|
49 |
+
category: ClapSegmentCategory.STORYBOARD,
|
50 |
prompt: "",
|
51 |
assetUrl: "",
|
52 |
+
outputType: ClapOutputType.IMAGE,
|
53 |
})
|
54 |
|
55 |
// we fix the existing clap
|
src/app/config.ts
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import { VideoGenerationModel
|
2 |
|
3 |
export const showBetaFeatures = `${
|
4 |
process.env.NEXT_PUBLIC_SHOW_BETA_FEATURES || ""
|
@@ -6,7 +6,6 @@ export const showBetaFeatures = `${
|
|
6 |
|
7 |
|
8 |
export const defaultVideoModel: VideoGenerationModel = "SVD"
|
9 |
-
export const defaultVideoOrientation: VideoOrientation = "landscape"
|
10 |
export const defaultVoice = "Julian"
|
11 |
|
12 |
export const developerMode = `${
|
|
|
1 |
+
import { VideoGenerationModel } from "@/types/general"
|
2 |
|
3 |
export const showBetaFeatures = `${
|
4 |
process.env.NEXT_PUBLIC_SHOW_BETA_FEATURES || ""
|
|
|
6 |
|
7 |
|
8 |
export const defaultVideoModel: VideoGenerationModel = "SVD"
|
|
|
9 |
export const defaultVoice = "Julian"
|
10 |
|
11 |
export const developerMode = `${
|
src/app/views/user-channel-view/index.tsx
CHANGED
@@ -2,11 +2,12 @@
|
|
2 |
|
3 |
import { useEffect, useState, useTransition } from "react"
|
4 |
|
|
|
|
|
|
|
5 |
import { useStore } from "@/app/state/useStore"
|
6 |
import { cn } from "@/lib/utils/cn"
|
7 |
import { VideoGenerationModel, MediaInfo } from "@/types/general"
|
8 |
-
|
9 |
-
import { useLocalStorage } from "usehooks-ts"
|
10 |
import { localStorageKeys } from "@/app/state/localStorageKeys"
|
11 |
import { defaultSettings } from "@/app/state/defaultSettings"
|
12 |
import { Input } from "@/components/ui/input"
|
@@ -17,8 +18,7 @@ import { PendingVideoList } from "@/components/interface/pending-video-list"
|
|
17 |
import { getChannelVideos } from "@/app/api/actions/ai-tube-hf/getChannelVideos"
|
18 |
import { parseVideoModelName } from "@/app/api/parsers/parseVideoModelName"
|
19 |
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
|
20 |
-
import { defaultVideoModel,
|
21 |
-
import { parseVideoOrientation } from "@/app/api/parsers/parseVideoOrientation"
|
22 |
|
23 |
export function UserChannelView() {
|
24 |
const [_isPending, startTransition] = useTransition()
|
@@ -37,7 +37,7 @@ export function UserChannelView() {
|
|
37 |
const [voice, setVoice] = useState(defaultVoice)
|
38 |
const [music, setMusic] = useState("")
|
39 |
const [duration, setDuration] = useState(0)
|
40 |
-
const [orientation, setOrientation] = useState(
|
41 |
|
42 |
// we do not include the tags in the list of required fields
|
43 |
const missingFields = !title || !description || !prompt
|
@@ -243,9 +243,9 @@ export function UserChannelView() {
|
|
243 |
<div className="flex flex-col space-y-2 flex-grow">
|
244 |
<Select
|
245 |
onValueChange={(value: string) => {
|
246 |
-
setOrientation(
|
247 |
}}
|
248 |
-
defaultValue={
|
249 |
<SelectTrigger className="">
|
250 |
<SelectValue placeholder="Video orientation" />
|
251 |
</SelectTrigger>
|
|
|
2 |
|
3 |
import { useEffect, useState, useTransition } from "react"
|
4 |
|
5 |
+
import { defaultMediaOrientation, parseMediaOrientation } from "@aitube/clap"
|
6 |
+
import { useLocalStorage } from "usehooks-ts"
|
7 |
+
|
8 |
import { useStore } from "@/app/state/useStore"
|
9 |
import { cn } from "@/lib/utils/cn"
|
10 |
import { VideoGenerationModel, MediaInfo } from "@/types/general"
|
|
|
|
|
11 |
import { localStorageKeys } from "@/app/state/localStorageKeys"
|
12 |
import { defaultSettings } from "@/app/state/defaultSettings"
|
13 |
import { Input } from "@/components/ui/input"
|
|
|
18 |
import { getChannelVideos } from "@/app/api/actions/ai-tube-hf/getChannelVideos"
|
19 |
import { parseVideoModelName } from "@/app/api/parsers/parseVideoModelName"
|
20 |
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
|
21 |
+
import { defaultVideoModel, defaultVoice } from "@/app/config"
|
|
|
22 |
|
23 |
export function UserChannelView() {
|
24 |
const [_isPending, startTransition] = useTransition()
|
|
|
37 |
const [voice, setVoice] = useState(defaultVoice)
|
38 |
const [music, setMusic] = useState("")
|
39 |
const [duration, setDuration] = useState(0)
|
40 |
+
const [orientation, setOrientation] = useState(defaultMediaOrientation)
|
41 |
|
42 |
// we do not include the tags in the list of required fields
|
43 |
const missingFields = !title || !description || !prompt
|
|
|
243 |
<div className="flex flex-col space-y-2 flex-grow">
|
244 |
<Select
|
245 |
onValueChange={(value: string) => {
|
246 |
+
setOrientation(parseMediaOrientation(value, defaultMediaOrientation))
|
247 |
}}
|
248 |
+
defaultValue={defaultMediaOrientation}>
|
249 |
<SelectTrigger className="">
|
250 |
<SelectValue placeholder="Video orientation" />
|
251 |
</SelectTrigger>
|
src/components/interface/latent-engine/core/useLatentEngine.ts
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
|
2 |
import { create } from "zustand"
|
3 |
|
4 |
-
import { ClapProject, ClapSegment, newClap, parseClap } from "@aitube/clap"
|
5 |
import { getVideoPrompt } from "@aitube/engine"
|
6 |
|
7 |
import { LatentEngineStore } from "./types"
|
@@ -518,7 +518,7 @@ export const useLatentEngine = create<LatentEngineStore>((set, get) => ({
|
|
518 |
// note: for now we only display one panel at a time,
|
519 |
// later we can try to see if we should handle more
|
520 |
// for nice gradient transition,
|
521 |
-
const interfaceLayers = await resolveSegments(clap,
|
522 |
|
523 |
if (get().isPlaying) {
|
524 |
set({
|
|
|
1 |
|
2 |
import { create } from "zustand"
|
3 |
|
4 |
+
import { ClapProject, ClapSegment, ClapSegmentCategory, newClap, parseClap } from "@aitube/clap"
|
5 |
import { getVideoPrompt } from "@aitube/engine"
|
6 |
|
7 |
import { LatentEngineStore } from "./types"
|
|
|
518 |
// note: for now we only display one panel at a time,
|
519 |
// later we can try to see if we should handle more
|
520 |
// for nice gradient transition,
|
521 |
+
const interfaceLayers = await resolveSegments(clap, ClapSegmentCategory.INTERFACE, 1)
|
522 |
|
523 |
if (get().isPlaying) {
|
524 |
set({
|
src/components/interface/latent-engine/resolvers/resolveSegment.ts
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import { ClapProject, ClapSegment } from "@aitube/clap"
|
2 |
|
3 |
import { LatentComponentResolver, LayerElement } from "../core/types"
|
4 |
|
@@ -10,11 +10,11 @@ import { resolve as imageResolver } from "./image"
|
|
10 |
export async function resolveSegment(segment: ClapSegment, clap: ClapProject): Promise<LayerElement> {
|
11 |
let latentComponentResolver: LatentComponentResolver = genericResolver
|
12 |
|
13 |
-
if (segment.category ===
|
14 |
latentComponentResolver = interfaceResolver
|
15 |
-
} else if (segment.category ===
|
16 |
latentComponentResolver = videoResolver
|
17 |
-
} else if (segment.category ===
|
18 |
latentComponentResolver = imageResolver
|
19 |
}
|
20 |
|
|
|
1 |
+
import { ClapProject, ClapSegment, ClapSegmentCategory } from "@aitube/clap"
|
2 |
|
3 |
import { LatentComponentResolver, LayerElement } from "../core/types"
|
4 |
|
|
|
10 |
export async function resolveSegment(segment: ClapSegment, clap: ClapProject): Promise<LayerElement> {
|
11 |
let latentComponentResolver: LatentComponentResolver = genericResolver
|
12 |
|
13 |
+
if (segment.category === ClapSegmentCategory.INTERFACE) {
|
14 |
latentComponentResolver = interfaceResolver
|
15 |
+
} else if (segment.category === ClapSegmentCategory.VIDEO) {
|
16 |
latentComponentResolver = videoResolver
|
17 |
+
} else if (segment.category === ClapSegmentCategory.STORYBOARD) {
|
18 |
latentComponentResolver = imageResolver
|
19 |
}
|
20 |
|
src/types/general.ts
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
|
2 |
|
3 |
export type ProjectionMode = 'cartesian' | 'spherical'
|
4 |
|
@@ -246,7 +246,7 @@ export type ChannelInfo = {
|
|
246 |
/**
|
247 |
* Default video orientation
|
248 |
*/
|
249 |
-
orientation:
|
250 |
}
|
251 |
|
252 |
export type VideoStatus =
|
@@ -339,7 +339,7 @@ export type VideoRequest = {
|
|
339 |
/**
|
340 |
* Video orientation
|
341 |
*/
|
342 |
-
orientation:
|
343 |
|
344 |
/**
|
345 |
* Video duration
|
@@ -347,11 +347,6 @@ export type VideoRequest = {
|
|
347 |
duration: number
|
348 |
}
|
349 |
|
350 |
-
export type VideoOrientation =
|
351 |
-
| "portrait"
|
352 |
-
| "landscape"
|
353 |
-
| "square"
|
354 |
-
|
355 |
export type MediaProjection =
|
356 |
| "cartesian" // this is the default
|
357 |
| "equirectangular"
|
@@ -492,7 +487,7 @@ export type MediaInfo = {
|
|
492 |
/**
|
493 |
* General media aspect ratio
|
494 |
*/
|
495 |
-
orientation:
|
496 |
|
497 |
/**
|
498 |
* Media projection (cartesian by default)
|
@@ -665,7 +660,7 @@ export type ParsedDatasetReadme = {
|
|
665 |
hf_tags: string[]
|
666 |
description: string
|
667 |
prompt: string
|
668 |
-
orientation:
|
669 |
}
|
670 |
|
671 |
export type ParsedMetadataAndContent = {
|
@@ -688,7 +683,7 @@ export type ParsedDatasetPrompt = {
|
|
688 |
thumbnail: string
|
689 |
voice: string
|
690 |
music: string
|
691 |
-
orientation:
|
692 |
}
|
693 |
|
694 |
export type UpdateQueueRequest = {
|
|
|
1 |
+
import type { ClapMediaOrientation } from "@aitube/clap"
|
2 |
|
3 |
export type ProjectionMode = 'cartesian' | 'spherical'
|
4 |
|
|
|
246 |
/**
|
247 |
* Default video orientation
|
248 |
*/
|
249 |
+
orientation: ClapMediaOrientation
|
250 |
}
|
251 |
|
252 |
export type VideoStatus =
|
|
|
339 |
/**
|
340 |
* Video orientation
|
341 |
*/
|
342 |
+
orientation: ClapMediaOrientation
|
343 |
|
344 |
/**
|
345 |
* Video duration
|
|
|
347 |
duration: number
|
348 |
}
|
349 |
|
|
|
|
|
|
|
|
|
|
|
350 |
export type MediaProjection =
|
351 |
| "cartesian" // this is the default
|
352 |
| "equirectangular"
|
|
|
487 |
/**
|
488 |
* General media aspect ratio
|
489 |
*/
|
490 |
+
orientation: ClapMediaOrientation
|
491 |
|
492 |
/**
|
493 |
* Media projection (cartesian by default)
|
|
|
660 |
hf_tags: string[]
|
661 |
description: string
|
662 |
prompt: string
|
663 |
+
orientation: ClapMediaOrientation
|
664 |
}
|
665 |
|
666 |
export type ParsedMetadataAndContent = {
|
|
|
683 |
thumbnail: string
|
684 |
voice: string
|
685 |
music: string
|
686 |
+
orientation: ClapMediaOrientation
|
687 |
}
|
688 |
|
689 |
export type UpdateQueueRequest = {
|