On a recent Winter weekend, I was snowed-in in my house. To use some time productively, I dusted off some old JavaScript code I wrote. The code implements a complete neural network classifier from scratch. The goal of the demo multi-class classifier NN is to predict the size of the company (small, medium, large) that a student works for after graduation, based on their sex (M, F), science units taken, major (finance, geology, history), and parents income.
The data is synthetic. It looks like:
-1 0.39 0 0 1 0.3570 0 1 0 1 0.51 1 0 0 0.8610 0 1 0 -1 0.19 0 0 1 0.6550 1 0 0 1 0.24 0 1 0 0.5700 0 0 1 1 0.22 1 0 0 0.8970 0 1 0 . . .
The first column is sex (M = -1, F = 1), the next column is number of science units taken, normalized by dividing by 100. The next three columns are the college major, one-hot encoded (finance = 100, geology = 010, history = 001), the sixth column is parent’s income, normalized by dividing by 100,000. The last three columns are the company size to predict (small = 100, medium = 010, large = 001).
The code is complex and involves every aspect of neural networks: network architecture, input-output, activation functions, mean squared error/loss, gradient descent training, softmax, classification accuracy, saving model weights and biases, and many other sub-topics. But the point is that neural networks aren’t magic — they’re just ideas and code, even if the ideas and code are complicated.
My demo runs in a shell and is based on the node.js framework. The demo code could be adapted to run in a browser, by replacing console.log() statements with some sort of display on a Web page in an HTML element.
A mildly annoying aspect of node.js file IO is that there is no easy way to do syncronous text file read. At first I though I must be completely missing something, but in node.js you just can’t read a file line-by-line without working at a very low level and buffering characters or bytes. Ugh.
Here are a few representative code snippets. The complete demo code is listed below.
. . . class NeuralNet { constructor(numInput, numHidden, numOutput, seed) { this.rnd = new U.Erratic(seed); this.ni = numInput; . . . this.iNodes = U.vecMake(this.ni, 0.0); this.hNodes = U.vecMake(this.nh, 0.0); . . . this.initWeights(); } . . . } . . . // compute output node signals for (let k = 0; k < this.no; ++k) { // softmax let derivative = (1 - this.oNodes[k]) * this.oNodes[k]; // E=(t-o)^2 oSignals[k] = derivative * (this.oNodes[k] - Y[k]); } . . . // compute hidden-to-output weight gradients using output signals for (let j = 0; j < this.nh; ++j) { for (let k = 0; k < this.no; ++k) { hoGrads[j][k] = oSignals[k] * this.hNodes[j]; } } . . .
Good fun!
There are several roller coaster model kits you can buy (CoasterDynamix, Lego, Thames and Kosmos, etc.) But Adam Throgmorton has created beautiful working model roller coasters from scratch — more interesting and beautiful than a kit model.
Demo code below. Very long. Replace “lt” (less-than), “gt”, “lte”, “gte” with symbols — my blog editor chokes on those symbols.
// students_nn.js // ES6, node.js // predict company size (S,M,L) from sex, science units, major, parents income. let U = require("../Utilities/utilities_lib.js"); let FS = require("fs"); // ============================================================================= class NeuralNet { constructor(numInput, numHidden, numOutput, seed) { this.rnd = new U.Erratic(seed); this.ni = numInput; this.nh = numHidden; this.no = numOutput; this.iNodes = U.vecMake(this.ni, 0.0); this.hNodes = U.vecMake(this.nh, 0.0); this.oNodes = U.vecMake(this.no, 0.0); this.ihWeights = U.matMake(this.ni, this.nh, 0.0); this.hoWeights = U.matMake(this.nh, this.no, 0.0); this.hBiases = U.vecMake(this.nh, 0.0); this.oBiases = U.vecMake(this.no, 0.0); this.initWeights(); } initWeights() { let lo = -0.01; let hi = 0.01; for (let i = 0; i "lt" this.ni; ++i) { for (let j = 0; j "lt" this.nh; ++j) { this.ihWeights[i][j] = (hi - lo) * this.rnd.next() + lo; } } for (let j = 0; j "lt" this.nh; ++j) { for (let k = 0; k "lt" this.no; ++k) { this.hoWeights[j][k] = (hi - lo) * this.rnd.next() + lo; } } } eval(X) { let hSums = U.vecMake(this.nh, 0.0); let oSums = U.vecMake(this.no, 0.0); this.iNodes = X; for (let j = 0; j "lt" this.nh; ++j) { for (let i = 0; i "lt" this.ni; ++i) { hSums[j] += this.iNodes[i] * this.ihWeights[i][j]; } hSums[j] += this.hBiases[j]; this.hNodes[j] = U.hyperTan(hSums[j]); } for (let k = 0; k "lt" this.no; ++k) { for (let j = 0; j "lt" this.nh; ++j) { oSums[k] += this.hNodes[j] * this.hoWeights[j][k]; } oSums[k] += this.oBiases[k]; } this.oNodes = U.softmax(oSums); let result = []; for (let k = 0; k "lt" this.no; ++k) { result[k] = this.oNodes[k]; } return result; } // eval() setWeights(wts) { // order: ihWts, hBiases, hoWts, oBiases let p = 0; for (let i = 0; i "lt" this.ni; ++i) { for (let j = 0; j "lt" this.nh; ++j) { this.ihWeights[i][j] = wts[p++]; } } for (let j = 0; j "lt" this.nh; ++j) { this.hBiases[j] = wts[p++]; } for (let j = 0; j "lt" this.nh; ++j) { for (let k = 0; k "lt" this.no; ++k) { this.hoWeights[j][k] = wts[p++]; } } for (let k = 0; k "lt" this.no; ++k) { this.oBiases[k] = wts[p++]; } } // setWeights() getWeights() { // order: ihWts, hBiases, hoWts, oBiases let numWts = (this.ni * this.nh) + this.nh + (this.nh * this.no) + this.no; let result = U.vecMake(numWts, 0.0); let p = 0; for (let i = 0; i "lt" this.ni; ++i) { for (let j = 0; j "lt" this.nh; ++j) { result[p++] = this.ihWeights[i][j]; } } for (let j = 0; j "lt" this.nh; ++j) { result[p++] = this.hBiases[j]; } for (let j = 0; j "lt" this.nh; ++j) { for (let k = 0; k "lt" this.no; ++k) { result[p++] = this.hoWeights[j][k]; } } for (let k = 0; k "lt" this.no; ++k) { result[p++] = this.oBiases[k]; } return result; } // getWeights() shuffle(v) { // Fisher-Yates let n = v.length; for (let i = 0; i "lt" n; ++i) { let r = this.rnd.nextInt(i, n); let tmp = v[r]; v[r] = v[i]; v[i] = tmp; } } train(trainX, trainY, lrnRate, maxEpochs) { let hoGrads = U.matMake(this.nh, this.no, 0.0); let obGrads = U.vecMake(this.no, 0.0); let ihGrads = U.matMake(this.ni, this.nh, 0.0); let hbGrads = U.vecMake(this.nh, 0.0); let oSignals = U.vecMake(this.no, 0.0); let hSignals = U.vecMake(this.nh, 0.0); let n = trainX.length; // 160 let indices = U.arange(n); // [0,1,..,159] let freq = Math.trunc(maxEpochs / 10); for (let epoch = 0; epoch "lt" maxEpochs; ++epoch) { this.shuffle(indices); // for (let ii = 0; ii "lt" n; ++ii) { // each item let idx = indices[ii]; let X = trainX[idx]; let Y = trainY[idx]; this.eval(X); // output stored in this.oNodes // compute output node signals for (let k = 0; k "lt" this.no; ++k) { let derivative = (1 - this.oNodes[k]) * this.oNodes[k]; // softmax oSignals[k] = derivative * (this.oNodes[k] - Y[k]); // E=(t-o)^2 } // compute hidden-to-output weight gradients using output signals for (let j = 0; j "lt" this.nh; ++j) { for (let k = 0; k "lt" this.no; ++k) { hoGrads[j][k] = oSignals[k] * this.hNodes[j]; } } // compute output node bias gradients using output signals for (let k = 0; k "lt" this.no; ++k) { obGrads[k] = oSignals[k] * 1.0; // 1.0 dummy input can be dropped } // compute hidden node signals for (let j = 0; j "lt" this.nh; ++j) { let sum = 0.0; for (let k = 0; k "lt" this.no; ++k) { sum += oSignals[k] * this.hoWeights[j][k]; } let derivative = (1 - this.hNodes[j]) * (1 + this.hNodes[j]); // tanh hSignals[j] = derivative * sum; } // compute input-to-hidden weight gradients using hidden signals for (let i = 0; i "lt" this.ni; ++i) { for (let j = 0; j "lt" this.nh; ++j) { ihGrads[i][j] = hSignals[j] * this.iNodes[i]; } } // compute hidden node bias gradients using hidden signals for (let j = 0; j "lt" this.nh; ++j) { hbGrads[j] = hSignals[j] * 1.0; // 1.0 dummy input can be dropped } // update input-to-hidden weights for (let i = 0; i "lt" this.ni; ++i) { for (let j = 0; j "lt" this.nh; ++j) { let delta = -1.0 * lrnRate * ihGrads[i][j]; this.ihWeights[i][j] += delta; } } // update hidden node biases for (let j = 0; j "lt" this.nh; ++j) { let delta = -1.0 * lrnRate * hbGrads[j]; this.hBiases[j] += delta; } // update hidden-to-output weights for (let j = 0; j "lt" this.nh; ++j) { for (let k = 0; k "lt" this.no; ++k) { let delta = -1.0 * lrnRate * hoGrads[j][k]; this.hoWeights[j][k] += delta; } } // update output node biases for (let k = 0; k "lt" this.no; ++k) { let delta = -1.0 * lrnRate * obGrads[k]; this.oBiases[k] += delta; } } // ii if (epoch % freq == 0) { let mse = this.meanSqErr(trainX, trainY).toFixed(4); let acc = this.accuracy(trainX, trainY).toFixed(4); let s1 = "epoch: " + epoch.toString().padStart(6, ' '); let s2 = " MSE = " + mse.toString().padStart(8, ' '); let s3 = " acc = " + acc.toString(); console.log(s1 + s2 + s3); } } // epoch } // train() meanCrossEntErr(dataX, dataY) { let sumCEE = 0.0; // sum of the cross entropy errors for (let i = 0; i "lt" dataX.length; ++i) { // each data item let X = dataX[i]; let Y = dataY[i]; // target output like (0, 1, 0) let oupt = this.eval(X); // computed like (0.23, 0.66, 0.11) let idx = U.argmax(Y); // find location of the 1 in target sumCEE += Math.log(oupt[idx]); } sumCEE *= -1; return sumCEE / dataX.length; } meanSqErr(dataX, dataY) { let sumSE = 0.0; for (let i = 0; i "lt" dataX.length; ++i) { // each data item let X = dataX[i]; let Y = dataY[i]; // target output like (0, 1, 0) let oupt = this.eval(X); // computed like (0.23, 0.66, 0.11) for (let k = 0; k "lt" this.no; ++k) { let err = Y[k] - oupt[k] // target - computed sumSE += err * err; } } return sumSE / dataX.length; } accuracy(dataX, dataY) { let nc = 0; let nw = 0; for (let i = 0; i "lt" dataX.length; ++i) { // each data item let X = dataX[i]; let Y = dataY[i]; // target output like (0, 1, 0) let oupt = this.eval(X); // computed like (0.23, 0.66, 0.11) let computedIdx = U.argmax(oupt); let targetIdx = U.argmax(Y); if (computedIdx == targetIdx) { ++nc; } else { ++nw; } } return nc / (nc + nw); } saveWeights(fn) { let wts = this.getWeights(); let n = wts.length; let s = ""; for (let i = 0; i "lt" n-1; ++i) { s += wts[i].toString() + ","; } s += wts[n-1]; FS.writeFileSync(fn, s); } loadWeights(fn) { let n = (this.ni * this.nh) + this.nh + (this.nh * this.no) + this.no; let wts = U.vecMake(n, 0.0); let all = FS.readFileSync(fn, "utf8"); let strVals = all.split(","); let nn = strVals.length; if (n != nn) { throw("Size error in NeuralNet.loadWeights()"); } for (let i = 0; i "lt" n; ++i) { wts[i] = parseFloat(strVals[i]); } this.setWeights(wts); } } // NeuralNet // ============================================================================= function main() { process.stdout.write("\033[0m"); // reset process.stdout.write("\x1b[1m" + "\x1b[37m"); // bright white console.log("\nBegin Students Data demo with JavaScript "); console.log("Goal is to predict size of company after graduation "); // 1. load data // raw data looks like: male 29 finance 38400.00 medium // norm data looks like: -1 0.29 1 0 0 0.3840 0 1 0 let trainX = U.loadTxt(".\\Data\\students_train.txt", "\t", [0,1,2,3,4,5]); let trainY = U.loadTxt(".\\Data\\students_train.txt", "\t", [6,7,8]); let testX = U.loadTxt(".\\Data\\students_test.txt", "\t", [0,1,2,3,4,5]); let testY = U.loadTxt(".\\Data\\students_test.txt", "\t", [6,7,8]); // 2. create network console.log("\nCreating a 6-40-3 tanh, softmax NN for Students dataset "); let seed = 1; let nn = new NeuralNet(6, 40, 3, seed); // 3. train network let lrnRate = 0.005; let maxEpochs = 4000; console.log("\nStarting training with learning rate = 0.005 "); nn.train(trainX, trainY, lrnRate, maxEpochs); console.log("Training complete "); // 4. evaluate model let trainAcc = nn.accuracy(trainX, trainY); let testAcc = nn.accuracy(testX, testY); console.log("\nAccuracy on training data = " + trainAcc.toFixed(4).toString()); console.log("Accuracy on test data = " + testAcc.toFixed(4).toString()); // 5. save trained model fn = ".\\Models\\students_wts.txt"; console.log("\nSaving model weights and biases to: "); console.log(fn); nn.saveWeights(fn); // 6. use trained model let unknownRaw = ["male", 46, "finance", 66400.00]; // sex, science units, major, parents income let unknownNorm = [-1, 0.46, 0, 0, 1, 0.6640]; let predicted = nn.eval(unknownNorm); console.log("\nRaw features of student to predict: "); console.log(unknownRaw); console.log("\nNormalized features of student to predict: "); U.vecShow(unknownNorm, 4, 12); console.log("\nPredicted pseudo-probabilities: "); U.vecShow(predicted, 4, 12); let companySizes = ["small", "medium", "large"]; let predIdx = U.argmax(predicted); let predSize = companySizes[predIdx]; console.log("Predicted company size = " + predSize); process.stdout.write("\033[0m"); // reset console.log("\nEnd demo"); } // main() main();
File students_train.txt. Original is tab-delimited, this has three spaces.
-1 0.39 0 0 1 0.3570 0 1 0 1 0.51 1 0 0 0.8610 0 1 0 1 0.52 0 0 1 0.6530 0 1 0 -1 0.36 0 1 0 0.7300 0 1 0 1 0.24 0 1 0 0.5700 0 0 1 1 0.27 1 0 0 0.7610 0 0 1 -1 0.19 0 0 1 0.6550 1 0 0 1 0.22 1 0 0 0.8970 0 1 0 -1 0.39 0 0 1 0.8230 0 0 1 1 0.63 1 0 0 0.2120 1 0 0 1 0.34 0 1 0 0.3780 0 1 0 -1 0.22 0 1 0 0.8500 1 0 0 1 0.35 0 0 1 0.8660 0 0 1 -1 0.33 1 0 0 0.8330 0 1 0 1 0.45 1 0 0 0.7680 0 1 0 1 0.42 1 0 0 0.4130 0 1 0 -1 0.33 1 0 0 0.2980 0 1 0 1 0.25 0 0 1 0.6010 0 1 0 -1 0.31 1 0 0 0.4370 1 0 0 1 0.27 0 1 0 0.5750 0 0 1 1 0.48 0 1 0 0.7570 0 1 0 -1 0.64 1 0 0 0.5740 0 0 1 1 0.61 1 0 0 0.2970 1 0 0 1 0.54 0 0 1 0.4170 1 0 0 1 0.29 0 1 0 0.8570 1 0 0 1 0.51 0 0 1 0.8000 0 1 0 1 0.55 0 0 1 0.2160 1 0 0 1 0.42 0 1 0 0.5610 1 0 0 1 0.22 0 1 0 0.2760 0 0 1 1 0.68 1 0 0 0.6810 1 0 0 -1 0.61 0 1 0 0.3940 0 0 1 -1 0.34 0 0 1 0.2550 0 1 0 -1 0.25 0 0 1 0.6230 1 0 0 -1 0.31 1 0 0 0.4300 0 1 0 1 0.43 0 0 1 0.8510 0 1 0 1 0.58 1 0 0 0.8850 0 0 1 -1 0.55 1 0 0 0.7180 0 0 1 -1 0.43 1 0 0 0.7370 0 1 0 -1 0.43 0 0 1 0.8130 0 1 0 -1 0.21 0 1 0 0.7700 1 0 0 1 0.55 0 0 1 0.7330 1 0 0 1 0.64 1 0 0 0.5460 1 0 0 -1 0.41 0 1 0 0.5250 0 1 0 1 0.64 0 0 1 0.4540 1 0 0 -1 0.56 0 0 1 0.4040 0 0 1 1 0.31 0 0 1 0.2050 0 1 0 -1 0.65 0 0 1 0.6860 0 0 1 1 0.55 0 0 1 0.3180 1 0 0 -1 0.25 0 1 0 0.7050 1 0 0 1 0.46 0 0 1 0.8180 0 1 0 -1 0.36 0 1 0 0.5260 1 0 0 1 0.52 1 0 0 0.5820 0 1 0 1 0.61 0 0 1 0.6940 1 0 0 1 0.57 0 0 1 0.5740 1 0 0 -1 0.46 1 0 0 0.2240 0 1 0 -1 0.62 0 1 0 0.2460 0 0 1 1 0.55 0 0 1 0.8530 1 0 0 -1 0.22 0 0 1 0.6800 0 1 0 -1 0.51 0 1 0 0.3750 1 0 0 -1 0.32 1 0 0 0.4800 0 1 0 -1 0.21 0 0 1 0.6710 1 0 0 1 0.44 1 0 0 0.6000 0 1 0 1 0.46 1 0 0 0.5290 0 1 0 1 0.62 1 0 0 0.2320 1 0 0 1 0.57 1 0 0 0.6740 1 0 0 -1 0.67 0 0 1 0.3930 0 0 1 1 0.29 0 1 0 0.4860 0 0 1 1 0.53 0 1 0 0.6190 1 0 0 -1 0.44 0 1 0 0.8680 0 1 0 1 0.46 1 0 0 0.2300 0 1 0 -1 0.21 1 0 0 0.3250 0 1 0 -1 0.38 0 1 0 0.7450 0 1 0 1 0.52 1 0 0 0.5980 0 1 0 1 0.33 1 0 0 0.7070 0 1 0 -1 0.33 1 0 0 0.7810 0 1 0 1 0.26 1 0 0 0.3690 1 0 0 1 0.58 0 1 0 0.2750 1 0 0 1 0.43 0 0 1 0.8880 0 1 0 -1 0.46 0 1 0 0.8900 1 0 0 1 0.61 0 1 0 0.5990 1 0 0 -1 0.42 0 1 0 0.6630 0 1 0 -1 0.56 0 0 1 0.2700 0 0 1 -1 0.62 1 0 0 0.3720 0 0 1 -1 0.51 0 1 0 0.2990 0 1 0 1 0.47 0 0 1 0.8090 0 1 0 -1 0.67 1 0 0 0.3080 0 0 1 -1 0.41 0 0 1 0.6850 0 1 0 1 0.42 1 0 0 0.5000 0 1 0 1 0.64 0 1 0 0.8040 1 0 0 -1 0.47 0 1 0 0.2580 0 0 1 1 0.45 1 0 0 0.8300 0 1 0 -1 0.25 0 0 1 0.6980 1 0 0 1 0.38 0 1 0 0.8280 1 0 0 1 0.55 0 0 1 0.2040 0 1 0 -1 0.44 0 1 0 0.7880 0 1 0 1 0.33 0 1 0 0.6400 0 1 0 1 0.34 0 0 1 0.6130 0 1 0 1 0.27 1 0 0 0.7360 0 0 1 1 0.32 1 0 0 0.2340 0 1 0 1 0.42 0 0 1 0.5350 0 1 0 -1 0.24 0 0 1 0.5490 1 0 0 1 0.42 1 0 0 0.8570 0 1 0 1 0.25 0 0 1 0.2920 0 0 1 1 0.51 1 0 0 0.8000 0 1 0 -1 0.55 1 0 0 0.6180 0 0 1 1 0.44 0 1 0 0.4510 0 0 1 -1 0.18 0 1 0 0.4430 1 0 0 -1 0.67 1 0 0 0.4860 0 0 1 1 0.45 0 0 1 0.8700 0 1 0 1 0.48 0 1 0 0.4830 0 1 0 -1 0.25 1 0 0 0.2170 0 1 0 -1 0.67 0 1 0 0.7670 0 1 0 1 0.37 0 0 1 0.2670 0 1 0 -1 0.32 0 1 0 0.7700 0 1 0 1 0.48 0 1 0 0.2260 0 1 0 -1 0.66 0 0 1 0.2990 0 0 1 1 0.61 0 1 0 0.3030 1 0 0 -1 0.58 0 0 1 0.6770 0 1 0 1 0.19 0 1 0 0.3010 0 0 1 1 0.38 0 0 1 0.6840 0 1 0 -1 0.27 0 1 0 0.6120 0 1 0 1 0.42 0 1 0 0.5140 0 1 0 1 0.61 0 1 0 0.7200 1 0 0 -1 0.27 0 0 1 0.8620 1 0 0 1 0.29 1 0 0 0.6460 1 0 0 -1 0.43 0 1 0 0.5990 0 1 0 1 0.48 0 1 0 0.8250 0 1 0 1 0.27 0 0 1 0.6140 0 0 1 -1 0.44 0 1 0 0.8910 1 0 0 1 0.23 1 0 0 0.4550 0 0 1 -1 0.36 1 0 0 0.8190 0 0 1 1 0.64 0 0 1 0.3130 1 0 0 1 0.29 0 0 1 0.7660 0 0 1 -1 0.33 0 1 0 0.2010 0 1 0 -1 0.66 1 0 0 0.2350 0 0 1 -1 0.21 0 0 1 0.6740 1 0 0 1 0.27 0 1 0 0.6540 0 0 1 1 0.29 0 1 0 0.6900 0 0 1 -1 0.31 0 1 0 0.8330 0 1 0 1 0.36 0 0 1 0.8130 0 1 0 1 0.49 1 0 0 0.8170 0 1 0 -1 0.28 0 1 0 0.2110 1 0 0 -1 0.43 0 0 1 0.7810 0 1 0 -1 0.46 1 0 0 0.7460 0 1 0 1 0.57 0 1 0 0.2330 1 0 0 -1 0.52 0 0 1 0.8910 0 1 0 -1 0.31 0 0 1 0.8700 0 1 0 -1 0.55 0 1 0 0.5860 0 0 1 1 0.51 0 1 0 0.5340 0 1 0 1 0.48 1 0 0 0.4510 0 1 0 -1 0.22 0 0 1 0.2970 1 0 0 1 0.59 0 0 1 0.5920 1 0 0 1 0.34 0 1 0 0.8240 0 0 1 -1 0.64 0 1 0 0.6300 0 0 1 1 0.29 0 0 1 0.8700 0 0 1 -1 0.34 1 0 0 0.7480 0 1 0 -1 0.61 0 1 0 0.2100 0 0 1 1 0.64 0 0 1 0.4450 1 0 0 -1 0.29 0 1 0 0.6360 1 0 0 1 0.63 1 0 0 0.4700 1 0 0
File students_test.txt
-1 0.29 1 0 0 0.6690 1 0 0 -1 0.51 0 1 0 0.4330 0 1 0 -1 0.24 0 0 1 0.8230 1 0 0 1 0.48 1 0 0 0.2080 0 1 0 1 0.18 0 1 0 0.3860 1 0 0 1 0.18 0 1 0 0.5110 0 0 1 1 0.33 1 0 0 0.3470 0 0 1 -1 0.22 0 0 1 0.2600 1 0 0 1 0.29 0 0 1 0.4920 0 0 1 -1 0.44 0 0 1 0.8490 1 0 0 -1 0.65 0 0 1 0.8540 1 0 0 -1 0.56 0 1 0 0.8940 0 0 1 -1 0.52 0 0 1 0.4890 0 1 0 -1 0.29 1 0 0 0.4560 1 0 0 -1 0.47 1 0 0 0.7480 0 1 0 1 0.68 0 1 0 0.6010 0 0 1 1 0.31 0 0 1 0.2910 0 1 0 1 0.61 1 0 0 0.8320 0 0 1 1 0.19 1 0 0 0.4750 0 0 1 1 0.38 0 0 1 0.8060 0 1 0 -1 0.26 0 1 0 0.7980 1 0 0 1 0.61 1 0 0 0.7250 1 0 0 1 0.41 0 1 0 0.2430 0 1 0 -1 0.49 0 1 0 0.3900 0 1 0 1 0.56 0 1 0 0.2680 1 0 0 -1 0.48 1 0 0 0.8220 0 1 0 1 0.52 0 1 0 0.5930 0 0 1 -1 0.18 0 1 0 0.4260 1 0 0 -1 0.56 0 0 1 0.6380 0 0 1 -1 0.52 1 0 0 0.8210 0 1 0 -1 0.18 1 0 0 0.2110 0 1 0 -1 0.58 0 1 0 0.2430 0 0 1 -1 0.39 1 0 0 0.7430 0 1 0 -1 0.46 0 1 0 0.5290 0 1 0 -1 0.41 1 0 0 0.2320 0 1 0 -1 0.61 0 1 0 0.2080 0 0 1 1 0.36 1 0 0 0.5360 0 0 1 1 0.44 0 1 0 0.7420 0 1 0 1 0.28 0 1 0 0.8020 0 0 1 1 0.54 0 0 1 0.4730 1 0 0
You must be logged in to post a comment.