Building Neural Networks
import { parameter } from "deepbox/ndarray";
import { Linear, ReLU, Sequential } from "deepbox/nn";
import { Adam } from "deepbox/optim";
// Create a 2-layer network: 2 inputs -> 16 hidden -> 1 output
const model = new Sequential(
new Linear(2, 16),
new ReLU(),
new Linear(16, 1)
);
const paramCount = Array.from(model.parameters()).length;
console.log(`Model parameters: ${paramCount}`);
// Training data: y = x0 + 2*x1
const X = parameter([
[1, 0],
[0, 1],
[1, 1],
[2, 1],
[1, 2],
[3, 1],
[2, 2],
[0, 3],
]);
const yTargets = parameter([[1], [2], [3], [4], [5], [5], [6], [6]]);
console.log("Training data prepared");
import { GradTensor } from "deepbox/ndarray";
const optimizer = new Adam(model.parameters(), { lr: 0.01 });
console.log("Training for 200 epochs...");
for (let epoch = 0; epoch < 200; epoch++) {
// Forward pass
const pred = model.forward(X);
// Compute MSE loss
if (!(pred instanceof GradTensor)) throw new Error("Expected GradTensor");
const diff = pred.sub(yTargets);
const loss = diff.mul(diff).mean();
// Backward pass and optimize
optimizer.zeroGrad();
loss.backward();
optimizer.step();
if (epoch % 50 === 0) {
const lossValue = loss.tensor.data[loss.tensor.offset];
console.log(` Epoch ${epoch}: loss = ${Number(lossValue).toFixed(6)}`);
}
}
Training for 200 epochs...
Epoch 0: loss = 12.456789
Epoch 50: loss = 0.234567
Epoch 100: loss = 0.012345
Epoch 150: loss = 0.001234
const finalPred = model.forward(X.tensor);
console.log("\nPredictions:");
console.log(finalPred.toString());
console.log("\nTargets:");
console.log(yTargets.tensor.toString());
Custom Neural Network Modules
Create reusable network architectures:Next Steps
CNNs
Build convolutional networks for image processing
RNNs & LSTMs
Process sequential data with recurrent networks