- {principles.map((principle) => (
-
-
- {principle.name}
+
+ {feature.name}
- {principle.description}
+ {feature.description}
))}
diff --git a/src/pages/gpu/GpuPage.tsx b/src/pages/gpu/GpuPage.tsx
index d0a11cb..e8e1d0b 100644
--- a/src/pages/gpu/GpuPage.tsx
+++ b/src/pages/gpu/GpuPage.tsx
@@ -7,6 +7,8 @@ import { GpuUseCases } from './GpuUseCases'
import { GpuGettingStarted } from './GpuGettingStarted'
import { GpuDifferentiators } from './GpuDifferentiators'
import { CallToAction } from './CallToAction'
+import { GpuCapabilities } from './GpuCapabilities'
+import { GpuDesign } from './GpuDesign'
export default function GpuPage() {
return (
@@ -14,24 +16,39 @@ export default function GpuPage() {
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
diff --git a/src/pages/gpu/GpuUseCases.tsx b/src/pages/gpu/GpuUseCases.tsx
index a6f1572..5f467b1 100644
--- a/src/pages/gpu/GpuUseCases.tsx
+++ b/src/pages/gpu/GpuUseCases.tsx
@@ -1,34 +1,18 @@
import { Container } from '../../components/Container'
import { Eyebrow, SectionHeader, P } from '../../components/Texts'
-const useCases = [
+const gpuUseCases = [
{
- title: 'AI / ML Training',
- description:
- 'Scale training, fine-tuning, and inference workloads anywhere on the grid.',
- bullets: ['GPU acceleration', 'Scalable compute', 'Cost optimization'],
+ title: 'AI / ML Training & Inference',
+ description: 'Scale model execution across sovereign GPU nodes.',
},
{
title: 'Rendering & Visualization',
- description:
- 'Drive high-performance graphics pipelines for media, science, and immersive experiences.',
- bullets: [
- 'Distributed 3D rendering',
- 'Scientific visualization',
- 'Real-time VR / AR processing',
- 'Digital twin simulations',
- ],
+ description: 'Run 3D, scientific, simulation, or generative rendering pipelines.',
},
{
- title: 'General GPU Computing',
- description:
- 'Harness sovereign acceleration for simulations, finance, blockchain, and research.',
- bullets: [
- 'Scientific simulations',
- 'Financial modeling',
- 'Blockchain processing',
- 'Protein folding and discovery',
- ],
+ title: 'Distributed & Edge Compute',
+ description: 'Place GPU power close to where data is generated.',
},
]
@@ -37,41 +21,28 @@ export function GpuUseCases() {
-
- Use Cases
-
+
USE CASES
- Acceleration for every intelligent workload.
+ Built for Intelligent Workloads
- From deep learning to immersive visualization, Mycelium GPU delivers
- deterministic access to the power you need without the waitlists or
- markups of centralized clouds.
+ From sovereign AI execution to real-time rendering and edge inference,
+ Mycelium GPU ensures predictable acceleration with full ownership and no centralized control.
-
- {useCases.map((useCase) => (
+
+
+ {gpuUseCases.map((useCase) => (
{useCase.title}
-
+
{useCase.description}
-
- {useCase.bullets.map((bullet) => (
- -
-
- {bullet}
-
- ))}
-
))}