A somewhat unusual machine learning problem scenario is one where the predictor variables are all Boolean. This is sometimes called Bernoulli classification. The most well-known example (to me anyway) of this type of problem is the House Voting dataset. I put together a demo using PyTorch.
The raw data looks like:
republican,n,y,n,y,y,y,n,n,n,y,?,y,y,y,n,y democrat,n,y,y,n,y,y,n,n,n,n,n,n,y,y,y,y democrat,n,y,n,y,y,y,n,n,n,n,n,n,?,y,y,y republican,n,y,n,y,y,y,n,n,n,n,n,y,y,y,n,y . . .
There are 435 data items, corresponding to each of the 435 members of the U.S. House of Representatives. The first column is the member’s political pary, Democrat or Republican. The next 16 values correspond to a vote on a particular bill. The possible values are ‘n’ (no), ‘y’ (yes), or ‘?’ (abstain).
I removed all the data items that had one or more ‘?’ values, which left me with 232 items. I encoded democrat as 0, republican as 1, and ‘n’ as 0, ‘y’ as 1. The result looks like:
0,0,1,1,0,1,1,0,0,0,0,0,0,1,1,1,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,1 . . .
I split the data into the first 200 items for training data, and the last 32 items for test data.
My PyTorch neural network has a 16-(10-10)-1 architecture with Xavier initialization, tanh hidden activation, and logistic sigmoid output activation. For training, I used a batch size of 10, BCELoss loss function, and SGD optimization.
The trained model scored 0.9950 accuracy on the training data (199 out of 200 correct), and 0.9062 accuracy on the test data (29 out of 32 correct).
One thing that’s interesting about problems that have all Boolean predictor variables is that in addition to all the normal classification techniques, there are two specialized techniques: Winnow classification and Bernoulli Naive Bayes classification. I’ll show those techniques at some point in the future.
Internet image searches. Left: “Democratic party”. Center: “Republican party”. Right: “Party party”.
Demo code:
# vote_pytorch.py # House Reps Voting Dataset binary classification # PyTorch 1.12.1-CPU Anaconda3-2020.02 Python 3.7.6 # Windows 10/11 import numpy as np import torch as T device = T.device('cpu') # apply to Tensor or Module class VotesDataset(T.utils.data.Dataset): # party - 16 votes # 0=democrat, 1 = republican # 0=no, 1=yes def __init__(self, src_file): all_data = np.loadtxt(src_file, usecols=range(0,17), delimiter=",", comments="#", dtype=np.float32) x_data = all_data[:,1:17] # 16 binary predictors y_data = all_data[:,0] # target 0 or 1 y_data = y_data.reshape(-1,1) # 2-D required self.x_data = T.tensor(x_data, dtype=T.float32).to(device) self.y_data = T.tensor(y_data, dtype=T.float32).to(device) def __len__(self): return len(self.x_data) def __getitem__(self, idx): votes = self.x_data[idx,:] # idx row, all 8 cols party = self.y_data[idx,:] # idx row, the only col return votes, party # as a Tuple # --------------------------------------------------------- class Net(T.nn.Module): def __init__(self): super(Net, self).__init__() self.hid1 = T.nn.Linear(16, 10) # 16-(10-10)-1 self.hid2 = T.nn.Linear(10, 10) self.oupt = T.nn.Linear(10, 1) T.nn.init.xavier_uniform_(self.hid1.weight) T.nn.init.zeros_(self.hid1.bias) T.nn.init.xavier_uniform_(self.hid2.weight) T.nn.init.zeros_(self.hid2.bias) T.nn.init.xavier_uniform_(self.oupt.weight) T.nn.init.zeros_(self.oupt.bias) def forward(self, x): z = T.tanh(self.hid1(x)) z = T.tanh(self.hid2(z)) z = T.sigmoid(self.oupt(z)) # for BCELoss() return z # --------------------------------------------------------- def metrics(model, ds, thresh=0.5): # note: N = total number of items = TP + FP + TN + FN # accuracy = (TP + TN) / N # precision = TP / (TP + FP) # recall = TP / (TP + FN) # F1 = 2 / [(1 / precision) + (1 / recall)] tp = 0; tn = 0; fp = 0; fn = 0 for i in range(len(ds)): inpts = ds[i][0] # dictionary style target = ds[i][1] # float32 [0.0] or [1.0] target = target.int() # int 0 or 1 with T.no_grad(): p = model(inpts) # between 0.0 and 1.0 # FP: "falsely predicted to be positive" # FN: "falsely predicted to be negative" if target == 1 and p "gte" thresh: # TP tp += 1 elif target == 1 and p "lt" thresh: # FN fn += 1 elif target == 0 and p "lt" thresh: # TN tn += 1 elif target == 0 and p "gte" thresh: # FP fp += 1 N = tp + fp + tn + fn if N != len(ds): print("FATAL LOGIC ERROR in metrics()") accuracy = (tp + tn) / (N * 1.0) precision = (1.0 * tp) / (tp + fp) # tp + fp != 0 recall = (1.0 * tp) / (tp + fn) # tp + fn != 0 f1 = 2.0 / ((1.0 / precision) + (1.0 / recall)) return (accuracy, precision, recall, f1) # as a Tuple # --------------------------------------------------------- def main(): # 0. get started print("\nVoting Dataset using PyTorch ") T.manual_seed(1) np.random.seed(1) # 1. create Dataset and DataLoader objects print("\nCreating Voting train and test Datasets ") train_file = ".\\Data\\votes_train.txt" test_file = ".\\Data\\votes_test.txt" train_ds = VotesDataset(train_file) # 200 rows test_ds = VotesDataset(test_file) # 32 rows bat_size = 10 train_ldr = T.utils.data.DataLoader(train_ds, batch_size=bat_size, shuffle=True) # 2. create neural network print("\nCreating 16-(10-10)-1 NN classifier \n") net = Net().to(device) net.train() # set training mode # 3. train network lrn_rate = 0.01 loss_func = T.nn.BCELoss() # binary cross entropy # loss_func = T.nn.MSELoss() optimizer = T.optim.SGD(net.parameters(), lr=lrn_rate) max_epochs = 500 ep_log_interval = 100 print("Loss function: " + str(loss_func)) print("Optimizer: " + str(optimizer.__class__.__name__)) print("Learn rate: " + "%0.3f" % lrn_rate) print("Batch size: " + str(bat_size)) print("Max epochs: " + str(max_epochs)) print("\nStarting training") for epoch in range(0, max_epochs): epoch_loss = 0.0 # for one full epoch for (batch_idx, batch) in enumerate(train_ldr): X = batch[0] # [bs,8] inputs Y = batch[1] # [bs,1] targets oupt = net(X) # [bs,1] computeds loss_val = loss_func(oupt, Y) # a tensor epoch_loss += loss_val.item() # accumulate optimizer.zero_grad() # reset all gradients loss_val.backward() # compute new gradients optimizer.step() # update all weights if epoch % ep_log_interval == 0: print("epoch = %4d loss = %8.4f" % \ (epoch, epoch_loss)) print("Done ") # --------------------------------------------------------- # 4. evaluate model net.eval() metrics_train = metrics(net, train_ds, thresh=0.5) print("\nMetrics for train data: ") print("accuracy = %0.4f " % metrics_train[0]) print("precision = %0.4f " % metrics_train[1]) print("recall = %0.4f " % metrics_train[2]) print("F1 = %0.4f " % metrics_train[3]) metrics_test = metrics(net, test_ds, thresh=0.5) print("\nMetrics for test data: ") print("accuracy = %0.4f " % metrics_test[0]) print("precision = %0.4f " % metrics_test[1]) print("recall = %0.4f " % metrics_test[2]) print("F1 = %0.4f " % metrics_test[3]) # 5. save model print("\nSaving trained model state_dict ") net.eval() # path = ".\\Models\\voting_model.pt" # T.save(net.state_dict(), path) # 6. make a prediction print("\nSetting dummy voting data ") x = np.array([[1,1,1,1, 0,0,0,0, 1,1,1,1, 0,0,0,0,]], dtype=np.float32) print(x) x = T.tensor(x, dtype=T.float32).to(device) net.eval() with T.no_grad(): oupt = net(x) # a Tensor pred_prob = oupt.item() # scalar, [0.0, 1.0] print("\nComputed output: ", end="") print("%0.4f" % pred_prob) if pred_prob "lt" 0.5: print("Prediction = Democrat ") else: print("Prediction = Repblican ") print("\nEnd Voting classification demo ") if __name__== "__main__": main()
Training data:
# votes_train.txt # 1st col = democrat (0), republican (1) # next 16 cols = no vote (0) or yes (1) # 200 rows (32 in test) # 0,0,1,1,0,1,1,0,0,0,0,0,0,1,1,1,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,1 0,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,1 0,1,1,1,0,0,0,1,1,1,0,0,0,0,0,1,1 0,1,0,1,0,0,0,1,1,1,1,0,0,0,0,1,1 0,1,0,1,0,0,0,1,1,1,0,1,0,0,0,1,1 0,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,1 1,1,0,0,1,1,0,1,1,1,0,0,1,1,1,0,1 0,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,0 0,1,1,1,0,0,0,1,1,1,1,0,0,1,0,1,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,1 0,1,1,1,0,0,0,1,1,1,0,0,0,0,0,1,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,0 1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,1 1,0,1,0,1,1,1,0,0,0,1,0,1,1,1,0,0 0,1,0,1,0,0,0,1,1,1,1,1,0,1,0,1,1 0,1,0,1,0,0,0,1,1,1,0,0,0,0,0,0,1 0,1,0,1,0,0,0,1,1,1,0,0,0,0,0,1,1 0,1,1,1,0,0,0,1,1,1,0,0,0,0,0,0,1 0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,1 0,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,1 1,1,1,0,1,1,1,0,0,0,1,0,1,1,1,0,0 1,0,1,0,1,1,1,0,0,0,1,1,1,1,1,0,0 1,0,1,0,1,1,1,0,0,0,1,1,1,1,1,0,1 1,0,1,0,1,1,1,0,0,0,1,0,1,1,1,0,1 1,0,1,0,1,1,1,0,0,0,1,0,1,1,1,0,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,0 0,1,1,1,0,0,0,1,1,1,0,1,0,0,0,0,1 1,1,1,0,1,1,1,1,0,0,0,0,1,1,1,0,1 1,0,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1 1,0,1,0,1,1,1,0,0,0,1,0,1,1,1,0,0 0,1,1,1,0,0,0,1,1,1,0,0,0,0,0,1,1 1,1,1,1,1,0,0,1,1,1,1,1,0,0,1,0,1 1,1,0,1,1,1,0,1,0,1,1,0,0,1,1,0,1 0,1,0,1,0,0,1,1,1,1,1,1,0,0,1,1,1 0,0,1,1,1,1,1,0,0,0,1,1,0,1,1,0,0 0,0,1,1,1,1,1,0,1,1,1,1,1,1,1,0,1 0,1,1,1,0,1,1,0,0,0,1,1,0,1,1,0,1 1,0,0,0,1,1,0,0,0,0,1,0,1,1,1,0,0 1,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,1 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,0 0,0,0,1,0,1,1,0,0,0,1,1,1,1,1,0,1 1,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,0 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,0 0,0,1,1,0,1,1,1,0,1,1,1,0,1,1,0,1 0,1,0,1,0,0,0,1,1,1,1,0,0,0,0,1,1 0,1,0,1,0,0,0,1,1,1,1,1,0,0,0,1,1 0,1,0,1,0,0,0,1,0,1,1,1,0,0,0,1,1 0,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,1 0,1,0,0,0,1,1,1,0,0,1,1,0,0,1,0,1 0,1,1,1,0,0,1,1,1,1,1,0,0,0,0,0,1 0,1,0,0,0,1,1,0,0,0,0,1,1,0,1,0,1 0,1,0,1,0,1,1,1,0,0,0,1,0,0,1,0,1 0,1,1,1,0,0,0,0,1,1,0,1,0,0,0,1,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,1 0,0,0,1,0,0,0,1,1,1,1,0,0,0,0,1,1 0,1,0,1,0,0,0,1,1,1,0,1,0,0,0,1,1 1,1,1,1,1,1,0,1,0,0,0,0,1,1,1,0,1 0,0,1,1,0,0,0,0,1,1,1,1,0,0,0,1,1 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,0 1,0,0,0,1,1,1,0,0,0,1,0,1,0,1,0,1 0,0,0,1,0,0,1,0,1,1,1,0,0,0,1,0,1 1,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,1 1,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,0 1,0,1,0,1,1,1,0,0,0,1,1,1,1,0,0,1 0,0,0,1,0,0,1,1,1,1,1,0,0,0,1,0,1 0,1,0,1,0,0,1,1,1,1,0,0,0,0,0,1,1 1,0,0,0,1,0,0,1,1,1,1,0,0,1,1,0,1 1,0,0,0,1,1,1,1,1,1,1,0,1,1,1,0,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,1 0,0,0,0,0,0,0,1,1,1,1,0,1,1,1,1,1 1,0,1,0,1,1,1,0,0,0,1,1,1,1,1,0,1 0,0,0,1,0,0,0,1,1,1,1,0,0,1,0,1,1 1,1,1,0,1,1,1,0,0,0,1,0,1,1,1,0,1 0,0,1,1,0,0,1,0,1,1,1,1,0,1,0,1,1 0,0,0,1,0,0,1,1,1,1,1,1,0,1,1,0,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,0 1,1,1,0,1,1,1,1,0,0,0,0,1,1,1,0,0 1,0,1,0,1,1,1,0,0,0,1,0,1,1,1,0,0 0,0,1,0,0,1,1,0,0,0,0,0,1,1,1,1,1 0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1 0,0,1,1,0,1,1,1,0,0,0,1,1,1,1,0,1 1,0,1,0,1,1,1,1,0,0,0,0,1,1,1,0,1 1,1,0,1,1,1,1,1,1,0,1,0,1,0,1,1,1 1,1,0,1,1,1,1,1,1,0,1,1,1,0,1,1,1 0,1,0,1,0,0,0,1,1,1,1,1,0,0,1,0,1 0,0,0,0,0,1,1,0,0,0,1,1,1,1,1,0,1 0,0,1,1,0,0,0,1,1,1,1,0,0,0,0,1,1 1,0,0,1,1,0,0,1,1,1,1,0,0,0,1,1,1 0,1,0,1,0,0,0,1,1,1,1,0,0,0,0,1,1 0,0,0,1,0,0,0,1,1,1,1,1,0,0,0,1,1 0,0,0,1,0,0,0,1,1,1,1,1,0,0,0,1,1 0,0,1,1,0,0,0,1,1,1,1,1,0,0,0,1,1 0,1,0,1,0,0,0,1,1,1,0,0,0,0,0,1,1 0,0,0,0,0,0,1,1,1,1,0,1,0,0,1,1,1 0,0,0,1,0,0,0,1,1,1,0,0,0,0,0,1,1 0,0,0,1,0,0,0,1,1,1,0,0,0,0,1,1,1 0,1,1,1,0,0,0,1,1,1,0,0,0,0,0,1,1 0,1,0,1,0,0,1,1,1,1,1,1,0,0,0,1,1 0,1,0,1,0,0,0,1,1,1,1,0,0,0,0,1,1 1,0,0,1,1,1,1,1,0,0,0,0,1,1,1,0,1 0,0,0,1,0,0,1,1,1,1,1,0,1,0,0,0,1 1,0,0,0,1,1,1,0,0,0,1,0,1,0,1,0,1 0,1,1,1,0,0,0,1,1,1,1,1,0,0,0,0,1 0,0,0,1,0,0,1,1,1,1,0,0,0,0,0,1,1 1,0,1,0,1,1,1,0,0,0,1,0,1,1,1,0,1 0,0,0,1,0,0,0,1,1,1,0,1,0,0,0,1,1 0,0,1,1,0,0,1,0,1,1,0,1,0,1,0,1,1 1,1,1,0,1,1,1,0,0,0,1,0,1,1,1,0,1 1,0,1,0,1,1,1,0,0,0,1,0,1,1,1,0,0 0,0,1,1,0,0,0,0,1,1,0,1,0,0,1,1,1 1,0,0,0,1,1,0,0,0,0,0,0,1,1,1,0,1 0,0,0,1,0,0,1,1,1,1,0,1,0,0,1,1,1 1,0,1,1,1,1,1,1,0,1,1,0,1,1,1,0,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,1 1,0,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1 1,0,1,0,1,1,1,0,0,0,1,0,1,1,1,0,1 1,0,0,0,1,1,1,0,0,0,1,0,1,0,1,0,1 0,0,0,1,0,0,0,1,1,1,0,0,0,0,0,1,1 0,1,0,1,0,0,1,1,1,0,0,0,1,1,0,0,1 1,0,0,0,1,1,1,1,0,0,1,0,0,0,1,1,1 0,1,0,1,0,0,0,1,1,1,1,1,0,0,1,1,1 0,1,0,1,0,0,0,0,1,1,1,0,0,0,0,1,1 0,1,0,1,0,0,0,1,1,1,1,1,0,0,0,1,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,0 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,0 0,1,1,1,0,0,1,1,1,1,0,0,0,0,0,1,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,1 0,1,0,1,0,0,0,1,1,1,1,0,0,0,0,0,1 0,1,0,1,0,0,0,1,1,1,1,0,0,0,1,1,1 1,0,0,0,1,1,0,0,0,0,0,0,1,0,1,0,0 0,0,0,1,0,0,0,1,1,1,0,1,0,0,0,1,1 0,1,0,1,0,0,0,1,1,1,0,0,0,0,0,0,1 0,1,0,1,0,0,0,1,1,1,1,0,0,0,0,0,1 0,1,0,1,0,0,0,1,1,1,1,0,0,0,0,0,1 1,0,0,0,1,1,1,0,0,0,1,0,1,0,1,0,1 1,1,0,0,0,0,0,1,1,1,1,0,0,0,1,0,1 0,1,0,1,0,0,0,1,1,1,0,0,0,0,0,0,1 0,1,1,1,0,0,0,1,1,1,0,0,0,0,0,1,1 1,1,0,0,1,1,0,1,0,0,1,0,0,0,1,1,1 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,1,0 1,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,1 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,1 1,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,0 0,0,0,1,0,0,0,1,1,1,1,0,0,0,1,0,1 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,1 0,0,0,1,0,0,1,1,1,1,1,1,0,0,0,1,1 0,1,1,1,0,1,1,0,1,0,1,1,0,1,1,1,1 0,1,0,1,0,0,1,1,1,0,1,1,0,1,1,1,1 0,1,1,1,0,0,1,1,1,1,1,1,0,1,1,1,1 1,0,0,1,1,1,1,0,0,0,1,0,1,1,1,1,1 0,0,1,0,0,0,0,1,1,1,1,1,0,0,0,1,1 0,0,1,1,0,0,1,1,1,1,1,0,0,1,1,1,1 1,0,0,0,1,1,0,1,1,1,1,0,1,1,1,0,1 1,0,0,0,1,1,1,1,0,0,1,0,1,1,1,0,1 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,0 1,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,0 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,0 1,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,0 0,1,0,0,0,0,1,1,1,1,1,0,0,0,1,1,1 1,0,0,0,1,1,1,0,0,0,1,0,1,1,1,1,0 0,0,0,1,0,0,1,1,1,1,1,0,0,1,0,0,1 0,1,1,1,0,0,0,1,1,1,1,0,0,0,0,1,1 1,0,1,1,1,1,1,0,0,0,1,0,1,1,1,0,1 1,0,1,0,1,1,1,1,1,0,0,1,1,1,1,1,1 0,0,0,0,0,0,1,0,1,1,0,1,1,1,1,1,0 0,1,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1 0,0,1,1,0,0,1,0,1,1,1,0,0,1,1,0,1 0,1,1,1,0,0,0,1,1,1,1,0,0,1,0,0,1 1,0,1,0,1,1,1,0,0,0,0,1,1,1,1,0,0 0,1,1,0,1,0,0,1,1,1,0,1,0,0,1,0,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,1 0,1,1,1,0,0,0,1,1,1,0,1,0,0,0,0,1 1,0,1,0,1,1,1,0,0,0,1,0,1,1,1,0,0 0,0,0,1,0,0,0,1,1,1,0,0,0,0,0,1,1 0,1,0,1,0,0,0,1,1,1,0,0,0,0,0,1,1 0,1,0,1,0,0,0,1,1,1,1,0,0,0,1,1,1 1,1,0,0,1,1,1,0,0,0,0,1,1,1,1,0,0 1,0,0,0,1,1,1,0,0,0,1,1,1,0,1,0,1 1,0,0,0,1,1,0,1,0,1,1,0,0,0,1,0,1 0,0,0,1,0,0,0,1,1,1,1,1,0,0,0,1,1 1,0,0,0,1,1,1,1,0,0,1,0,1,0,1,1,1 1,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,1 1,1,0,0,1,1,1,0,0,0,1,0,1,1,1,0,0 1,0,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1 0,0,1,0,0,0,1,1,0,1,0,1,0,0,0,1,1 1,0,0,1,1,1,1,1,1,1,1,0,1,1,1,1,1 1,0,0,1,1,1,1,1,0,0,1,1,1,1,1,0,1 1,1,0,1,1,0,0,0,1,1,1,0,0,0,1,1,1 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,0 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,0 0,1,0,1,0,0,1,1,1,1,1,0,0,1,0,0,1 0,1,1,1,0,0,1,1,1,1,1,1,1,1,0,0,1 1,1,1,0,1,1,1,0,0,0,1,1,0,1,0,0,0 1,1,1,0,1,1,1,0,0,0,0,1,0,1,1,0,1 0,0,1,0,0,1,1,0,0,0,1,1,0,1,1,0,0 0,0,1,1,0,0,1,1,1,0,1,0,0,0,0,1,1 1,0,1,0,1,1,1,0,0,0,0,0,0,1,1,0,1 1,0,1,0,1,1,1,0,0,0,0,0,1,1,1,0,1
Test data:
0,0,1,0,1,1,1,0,0,0,0,1,1,0,1,0,0 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,1 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,1 0,1,1,1,0,1,1,0,1,1,1,1,0,0,0,0,1 0,1,1,1,1,1,1,0,0,0,0,1,1,1,1,0,1 0,1,1,0,0,1,1,0,0,0,0,1,1,1,1,1,0 0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,1,0 1,1,1,0,1,1,1,0,0,0,0,1,1,1,1,0,1 0,1,1,1,0,1,1,0,1,0,0,1,0,1,0,1,1 0,0,1,1,0,1,1,0,1,0,0,0,0,0,0,0,1 1,0,1,0,1,1,1,0,0,0,1,1,1,1,1,0,0 1,1,1,0,1,1,1,0,0,0,1,0,1,1,1,0,1 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,1 0,1,0,1,0,1,1,0,0,1,1,0,0,1,1,0,1 0,0,0,0,1,1,1,0,0,0,0,1,1,1,1,0,0 1,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,0 1,0,0,0,1,1,1,0,0,0,0,1,1,1,1,0,1 0,1,0,1,0,0,1,1,1,1,1,1,0,0,0,0,1 1,0,0,0,1,1,1,0,0,0,1,0,1,1,1,0,1 0,1,1,1,0,0,0,1,1,1,0,0,0,0,0,0,1 1,1,1,0,1,1,1,0,0,0,1,0,0,1,1,0,1 0,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,1 0,1,1,1,0,0,0,1,1,0,1,0,0,0,0,0,1 0,1,1,1,0,0,0,1,1,1,0,0,0,0,0,0,1 1,1,1,1,1,1,1,1,1,0,1,0,0,1,1,0,1 0,0,1,1,0,1,1,1,1,0,0,1,0,1,0,1,1 0,0,0,1,0,0,1,1,1,1,0,1,0,0,0,1,1 0,0,1,1,0,0,1,1,1,1,0,1,0,0,1,1,1 0,1,0,1,0,0,0,1,1,1,1,0,0,0,0,1,1 1,0,0,0,1,1,1,1,1,0,1,0,1,1,1,0,1 1,0,0,1,1,1,1,0,0,1,1,0,1,1,1,0,1 0,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,1
You must be logged in to post a comment.