- JS-Torch is a Deep Learning JavaScript library built from scratch, to closely follow PyTorch's syntax.
- It contains a fully functional Tensor object, which can track gradients, Deep Learning Layers and functions, and an Automatic Differentiation engine.
- Feel free to try out a Web Demo!
Implemented Tensor Operations:
Implemented Deep Learning Layers:
assets/: Folder to store images and the Demo.assets/demo/: JS-Torch's Web Demo.
src/: Framework with JavaScript files.src/tensor.js: File with theTensorclass and all of the tensorOperations.src/utils.js: File with operations and helper functions.src/layers.js: Submodule of the framework. Contains full layers.src/optim.js: Submodule of the framework. Contains Adam Optimizer.
tests/: Folder with unit tests. Containstest.js.
const torch = require("js-pytorch");
// Instantiate Tensors:
x = torch.randn([8,4,5]);
w = torch.randn([8,5,4], requires_grad = true);
b = torch.tensor([0.2, 0.5, 0.1, 0.0], requires_grad = true);
// Make calculations:
out = torch.matmul(x, w);
out = torch.add(out, b);
// Compute gradients on whole graph:
out.backward();
// Get gradients from specific Tensors:
console.log(w.grad);
console.log(b.grad);const torch = require("js-pytorch");
const nn = torch.nn;
class Transformer extends nn.Module {
constructor(vocab_size, hidden_size, n_timesteps, n_heads, p) {
super();
// Instantiate Transformer's Layers:
this.embed = new nn.Embedding(vocab_size, hidden_size);
this.pos_embed = new nn.PositionalEmbedding(n_timesteps, hidden_size);
this.b1 = new nn.Block(hidden_size, hidden_size, n_heads, n_timesteps, dropout_p=p);
this.b2 = new nn.Block(hidden_size, hidden_size, n_heads, n_timesteps, dropout_p=p);
this.ln = new nn.LayerNorm(hidden_size);
this.linear = new nn.Linear(hidden_size, vocab_size);
};
forward(x) {
let z;
z = torch.add(this.embed.forward(x), this.pos_embed.forward(x));
z = this.b1.forward(z);
z = this.b2.forward(z);
z = this.ln.forward(z);
z = this.linear.forward(z);
return z;
};
};
// Instantiate your custom nn.Module:
const model = new Transformer(vocab_size, hidden_size, n_timesteps, n_heads, dropout_p);
// Define loss function and optimizer:
const loss_func = new nn.CrossEntropyLoss();
const optimizer = new optim.Adam(model.parameters(), lr=5e-3, reg=0);
// Instantiate sample input and output:
let x = torch.randint(0,vocab_size,[batch_size,n_timesteps,1]);
let y = torch.randint(0,vocab_size,[batch_size,n_timesteps]);
let loss;
// Training Loop:
for(let i=0 ; i < 40 ; i++) {
// Forward pass through the Transformer:
let z = model.forward(x);
// Get loss:
loss = loss_func.forward(z, y);
// Backpropagate the loss using torch.tensor's backward() method:
loss.backward();
// Update the weights:
optimizer.step();
// Reset the gradients to zero after each training step:
optimizer.zero_grad();
};Note: You can install the package locally with:
npm install js-pytorch
- The models implemented in the unit tests all converged to near-zero losses.
- This package is not as optimized as PyTorch yet, but I tried making it more interpretable. Efficiency improvements are incoming!
- Hope you enjoy!