TensorFlow 2.0: Building Simple Classifier using Low Level APIs
At the end of my comparison — TensorFlow 1.14 Keras’ API versus Julia’s Flux.jl and Knet.jl high level APIs — I indicated some future write-ups I plan to do, one of which is to compare (obviously) on the low level APIs. However, with the release of the much anticipated TensorFlow 2.0, I decided not to wait for the next comparison and instead dedicate a separate article for the said release. The goal of this blog post then is to simply replicate the modeling in my previous article, without using the Keras API. Specifically, I’ll discuss the steps on constructing a custom layer, how we can chain multiple layers as in Keras’ Sequential
, do a feedforward, a backward by updating the gradient, and batching the training datasets.
Load the Libraries
To start with, load the necessary libraries by running the following codes:
import numpy as np | |
import pandas as pd | |
import tensorflow as tf | |
from enum import Enum | |
from sklearn.datasets import load_iris | |
from typing import Callable, Iterable, List, Tuple |
Define the Constants
The following are the constants that we will be using on the specification of the succeeding codes:
class HyperParams(Enum): | |
ACTIVATION = tf.nn.relu | |
BATCH_SIZE = 5 | |
EPOCHS = 500 | |
HIDDEN_NEURONS = 10 | |
NORMALIZER = tf.nn.softmax | |
OUTPUT_NEURONS = 3 | |
OPTIMIZER = tf.keras.optimizers.Adam |
tf.nn.relu
) as the activation for the first hidden layer and softmax (tf.nn.softmax
) for the output layer. Other parameters include batch-size, the number of epochs, and the optimizer which in this case is Adam (tf.keras.optimizers.Adam
). Feel free to change the items above, for example, you can explore on the different optimizers, or on the activation functions.
Load the Data
The Iris dataset is available in Python’s Scikit-Learn library and can be loaded as follows:
iris = load_iris() | |
xdat = iris.data | |
ydat = iris.target |
xdat
and ydat
are both np.ndarray
objects, and we are going to partition these into training and testing datasets using the Data
class defined in the next section.
Define the Data Class
To organize the data processing, below is the class with methods on partitioning, tensorizing, and batching the input data:
class Data: | |
def __init__(self, xdat: np.ndarray, ydat: np.ndarray, ratio: float = 0.3) -> Tuple: | |
self.xdat = xdat | |
self.ydat = ydat | |
self.ratio = ratio | |
def partition(self) -> None: | |
scnt = self.xdat.shape[0] / np.unique(self.ydat).shape[0] | |
ntst = int(self.xdat.shape[0] * self.ratio / (np.unique(self.ydat)).shape[0]) | |
idx = np.random.choice(np.arange(0, self.ydat.shape[0] / np.unique(self.ydat).shape[0], dtype = int), ntst, replace = False) | |
for i in np.arange(1, np.unique(self.ydat).shape[0]): | |
idx = np.concatenate((idx, np.random.choice(np.arange((scnt * i), scnt * (i + 1), dtype = int), ntst, replace = False))) | |
self.xtrn = self.xdat[np.where(~np.in1d(np.arange(0, self.ydat.shape[0]), idx))[0], :] | |
self.ytrn = self.ydat[np.where(~np.in1d(np.arange(0, self.ydat.shape[0]), idx))[0]] | |
self.xtst = self.xdat[idx, :] | |
self.ytst = self.ydat[idx] | |
def to_tensor(self, depth: int = 3) -> None: | |
self.xtrn = tf.convert_to_tensor(self.xtrn, dtype = np.float32) | |
self.xtst = tf.convert_to_tensor(self.xtst, dtype = np.float32) | |
self.ytrn = tf.convert_to_tensor(tf.one_hot(self.ytrn, depth = depth)) | |
self.ytst = tf.convert_to_tensor(tf.one_hot(self.ytst, depth = depth)) | |
def batch(self, num: int = 16) -> None: | |
try: | |
size = self.xtrn.shape[0] / num | |
if self.xtrn.shape[0] % num != 0: | |
sizes = [tf.floor(size).numpy().astype(int) for i in range(num)] + [self.xtrn.shape[0] % num] | |
else: | |
sizes = [tf.floor(size).numpy().astype(int) for i in range(num)] | |
self.xtrn_batches = tf.split(self.xtrn, num_or_size_splits = sizes, axis = 0) | |
self.ytrn_batches = tf.split(self.ytrn, num_or_size_splits = sizes, axis = 0) | |
num = int(self.xtst.shape[0] / sizes[0]) | |
if self.xtst.shape[0] % sizes[0] != 0: | |
sizes = [sizes[i] for i in range(num)] + [self.xtst.shape[0] % sizes[0]] | |
else: | |
sizes = [sizes[i] for i in range(num)] | |
self.xtst_batches = tf.split(self.xtst, num_or_size_splits = sizes, axis = 0) | |
self.ytst_batches = tf.split(self.ytst, num_or_size_splits = sizes, axis = 0) | |
except: | |
self.xtrn_batches = [self.xtrn] | |
self.ytrn_batches = [self.ytrn] | |
self.xtst_batches = [self.xtst] | |
self.ytst_batches = [self.ytst] |
Data Cleaning/Processing
We then apply the above codes to the iris dataset as shown below:
data = Data(xdat, ydat) | |
data.partition() | |
data.to_tensor() | |
data.batch(HyperParams.BATCH_SIZE.value) |
Defining the Dense Layer
The model as mentioned in my previous article is a MultiLayer Perceptron (MLP) with one hidden layer, please refer to the said article for more details. In order to use the low level APIs, we need to define the Dense
layer from scratch. As a guide, below is the mathematical formulation of the said layer:
y=σ(x⋅W+b),
Translating all these into codes, we have the following:
class Dense: | |
def __init__(self, i: int, o: int, f: Callable[[tf.Tensor], tf.Tensor], initializer: Callable = tf.random.normal) -> None: | |
self.w = tf.Variable(initializer([i, o])) | |
self.b = tf.Variable(initializer([o])) | |
self.f = f | |
def __call__(self, x: tf.Tensor) -> tf.Tensor: | |
if callable(self.f): | |
return self.f(tf.add(tf.matmul(x, self.w), self.b)) | |
else: | |
return tf.add(tf.matmul(x, self.w), self.b) |
Dense
layer by assigning starting values to the weights, and Lines 8-12 define the feedforward. Further, if there is something special in the above code, it’s the tf.Variable
. This class tells TensorFlow that the object is learnable during model optimization. Lastly, I want to emphasize that prior to TensorFlow 2.0, tf.random.normal
was defined as tf.random_normal
.
Feedforward
We can now use the Dense
layer to perform feedforward computation as follows:
layer = Dense(4, 2, tf.nn.relu) | |
layer(data.xtrn[1:2, :]) | |
#> tf.Tensor([[12.937485 0. ]], shape=(1, 2), dtype=float32) | |
layer(data.xtrn[1:5, :]) | |
#> tf.Tensor( | |
#> [[12.937484 0. ] | |
#> [12.557415 0. ] | |
#> [13.761768 0. ] | |
#> [14.996015 0. ]], shape=(4, 2), dtype=float32) |
data.xtrn
as input. Of course, these values may change since we did not specify any seed for the initial weights and biases.
Defining the Chain
Now that we know how to initialize a single Dense
layer, we need to extend it to multiple layers to come up with a MLP architecture. Hence, what we need to have now is a Chain
for layers. In short, we need to replicate (not all attributes and methods) the Keras’ Sequential
, and this is done as follows:
class Chain: | |
def __init__(self, layers: List[Iterable[Dense]]) -> None: | |
self.layers = layers | |
def __call__(self, x: tf.Tensor) -> tf.Tensor: | |
self.out = x; self.params = [] | |
for l in self.layers: | |
self.out = l(self.out) | |
self.params.append([l.w, l.b]) | |
self.params = [j for i in self.params for j in i] | |
return self.out | |
def backward(self, inputs: tf.Tensor, targets: tf.Tensor) -> None: | |
grads = self.grad(inputs, targets) | |
self.optimize(grads, 0.001) | |
def loss(self, preds: tf.Tensor, targets: tf.Tensor) -> tf.Tensor: | |
return tf.reduce_mean( | |
tf.keras.losses.categorical_crossentropy( | |
targets, preds | |
) | |
) | |
def grad(self, inputs: tf.Tensor, targets: tf.Tensor) -> List: | |
with tf.GradientTape() as g: | |
error = self.loss(self(inputs), targets) | |
return g.gradient(error, self.params) | |
def optimize(self, grads: List[tf.Tensor], rate: float) -> None: | |
opt = HyperParams.OPTIMIZER.value(learning_rate = rate) | |
opt.apply_gradients(zip(grads, self.params)) |
Dense
layers (referring to self.layers
at Line 4 above) as input. Once we have that, we can do feedforward computation using the untrained weights by simply calling the object (as defined by __call__
).
To train the weights, we need to call the backward
method, which does backpropagation. This is done by optimize
-ing the weights, using the grad
ient obtained by differentiating the loss
function of the model, with respect to the learnable parameters (referring to self.params
in Line 12 above). In TensorFlow 2.0, it is recommended to use the Keras optimizers (keras.optimizers
) instead of the tf.train.GradientDescentOptimizer
. One thing to note as well, is the loss function which is a .categorical_crossentropy
as opposed to .sparse_categorical_crossentropy
which is used in my previous article. The difference is due to the fact that the target variable we have here is encoded as one-hot vector (you can confirm this under the to_tensor
method of the Data
class above), and thus cross entropy is used, as opposed to integer encoding, in which case sparse cross entropy is more appropriate.
Referring back to the codes, I think Lines 33-34 are self-explanatory and should redirect us to the grad
function where we find the tf.GradientTape
, which is not obvious as to what exactly it does at first glance, apart from the high level understanding that it has something to do with the gradient computation (or it could really be the main moving part). Well, from the name itself, we can think of it as a “Tape Recorder”, which records the gradient of the trainable variables (referring to tf.Variable
in Lines 4-5 of the Dense
layer) with respect to the loss of the model. That is, when we do forward operation (referring to self(inputs)
at Line 28 above), TensorFlow automatically differentiate the loss with respect to the parameters. In order then to update the weights under the Adam (referring to opt
in Line 34 above) algorithm, we need to have these gradients at the given iteration. Thus we have a recorder (tf.GradientTape
in this case), meant to extract the recorded gradients.
Finally, we chain the layers as follows:
model = Chain([ | |
Dense(data.xtrn.shape[1], HyperParams.HIDDEN_NEURONS.value, HyperParams.ACTIVATION), | |
Dense(HyperParams.HIDDEN_NEURONS.value, HyperParams.OUTPUT_NEURONS.value, HyperParams.NORMALIZER) | |
]) |
layer
object above) into chains of two layers, and we can call it as follows:
model(data.xtrn[1:2, :]) | |
#> tf.Tensor([[2.8523763e-16 1.8208168e-15 1.0000000e+00]], shape=(1, 3), dtype=float32) | |
model(data.xtrn[1:5, :]) | |
#> tf.Tensor( | |
#> [[2.8523763e-16 1.8208168e-15 1.0000000e+00] | |
#> [4.9846957e-16 8.1282060e-16 1.0000000e+00] | |
#> [6.2472026e-16 1.2082151e-15 1.0000000e+00] | |
#> [1.8308374e-17 2.8908239e-17 1.0000000e+00]], shape=(4, 3), dtype=float32) |
Training
At this point, we can now optimize the parameters by calling the backward
method; and because we are not using the Keras’ Sequential([...]).fit
, we can customize our training procedure to suit our needs — starting with the custom definition of the model accuracy:
def accuracy(y, yhat): | |
j = 0; correct = [] | |
for i in tf.argmax(y, 1): | |
if i == tf.argmax(yhat[j]): | |
correct.append(1) | |
j += 1 | |
num = tf.cast(tf.reduce_sum(correct), dtype = tf.float32) | |
den = tf.cast(y.shape[0], dtype = tf.float32) | |
return num / den |
epoch_trn_loss = [] | |
epoch_tst_loss = [] | |
epoch_trn_accy = [] | |
epoch_tst_accy = [] | |
for j in range(HyperParams.EPOCHS.value): | |
trn_loss = []; trn_accy = [] | |
for i in range(len(data.xtrn_batches)): | |
model.backward(data.xtrn_batches[i], data.ytrn_batches[i]) | |
ypred = model(data.xtrn_batches[i]) | |
trn_loss.append(model.loss(ypred, data.ytrn_batches[i])) | |
trn_accy.append(accuracy(data.ytrn_batches[i], ypred)) | |
trn_err = tf.reduce_mean(trn_loss).numpy() | |
trn_acy = tf.reduce_mean(trn_accy).numpy() | |
tst_loss = []; tst_accy = [] | |
for i in range(len(data.xtst_batches)): | |
ypred = model(data.xtst_batches[i]) | |
tst_loss.append(model.loss(ypred, data.ytst_batches[i])) | |
tst_accy.append(accuracy(data.ytst_batches[i], ypred)) | |
tst_err = tf.reduce_mean(tst_loss).numpy() | |
tst_acy = tf.reduce_mean(tst_accy).numpy() | |
epoch_trn_loss.append(trn_err) | |
epoch_tst_loss.append(tst_err) | |
epoch_trn_accy.append(trn_acy) | |
epoch_tst_accy.append(tst_acy) | |
if j % 20 == 0: | |
print("Epoch: {0:4d} \t Training Error: {1:.4f} \t Testing Error: {2:.4f} \t Accuracy Training: {3:.4f} \t Accuracy Testing: {4:.4f}".format(j, trn_err, tst_err, trn_acy, tst_acy)) | |
#> Epoch: 0 Training Error: 10.4038 Testing Error: 7.4863 Accuracy Training: 0.3333 Accuracy Testing: 0.5238 | |
#> Epoch: 20 Training Error: 9.3302 Testing Error: 6.7334 Accuracy Training: 0.3333 Accuracy Testing: 0.5238 | |
#> Epoch: 40 Training Error: 8.1914 Testing Error: 5.9173 Accuracy Training: 0.3333 Accuracy Testing: 0.5238 | |
#> Epoch: 60 Training Error: 7.0320 Testing Error: 5.0859 Accuracy Training: 0.3333 Accuracy Testing: 0.5238 | |
#> Epoch: 80 Training Error: 5.9393 Testing Error: 4.2839 Accuracy Training: 0.3619 Accuracy Testing: 0.5238 | |
#> Epoch: 100 Training Error: 5.4461 Testing Error: 3.8967 Accuracy Training: 0.6571 Accuracy Testing: 0.7619 | |
#> Epoch: 120 Training Error: 5.3919 Testing Error: 3.8529 Accuracy Training: 0.6667 Accuracy Testing: 0.7619 | |
#> Epoch: 140 Training Error: 5.3826 Testing Error: 3.8454 Accuracy Training: 0.6667 Accuracy Testing: 0.7619 | |
#> Epoch: 160 Training Error: 5.3793 Testing Error: 3.8427 Accuracy Training: 0.6667 Accuracy Testing: 0.7619 | |
#> Epoch: 180 Training Error: 5.3777 Testing Error: 3.8414 Accuracy Training: 0.6667 Accuracy Testing: 0.7619 |
df = pd.DataFrame({ | |
"trn_loss" : epoch_trn_loss, | |
"trn_accy" : epoch_trn_accy, | |
"tst_loss" : epoch_tst_loss, | |
"tst_accy" : epoch_tst_accy | |
}) | |
df.to_csv("../tf2_output_normal_initializer_batch_size_" + str(HyperParams.BATCH_SIZE.value) + ".csv") |
End Note
That’s it, I have shown you how to do modeling using TensorFlow 2.0’s Core APIs. As an end note, I want to highlight two key points on the importance of using the low-level APIs. The first one, is having full control on your end-to-end modeling process. Having the flexibility on your tools, enables the user to solve problems with custom models, custom objective function, custom optimization algorithms, and whatnot. The second point, is the appreciation of the theory. It is simply fulfilling to see how the theory works in practice, and it gives the user the confidence to experiment, for example on the gradients and other internals.
Lastly, I am pleased with the clean API of the TF 2.0 as opposed to the redundant APIs we have in the previous versions; and with eager-execution as the default configuration, makes the library even more pythonic. Feel free to share your thoughts, if you have comments/suggestions.
Next Steps
In my next article, I will likely start on TensorFlow Probability, which extends the TF core APIs by incorporating Bayesian approach to modeling and statistical analyses. Otherwise, I will touch on modeling image datasets, or present new topic.
Complete Codes
If you are impatient, here is the complete code excluding the plots. This should work after installing the required libraries:
import numpy as np | |
import pandas as pd | |
import tensorflow as tf | |
from enum import Enum | |
from sklearn.datasets import load_iris | |
from typing import Callable, Iterable, List, Tuple | |
class HyperParams(Enum): | |
ACTIVATION = tf.nn.relu | |
BATCH_SIZE = 5 | |
EPOCHS = 500 | |
HIDDEN_NEURONS = 10 | |
NORMALIZER = tf.nn.softmax | |
OUTPUT_NEURONS = 3 | |
OPTIMIZER = tf.keras.optimizers.Adam | |
iris = load_iris() | |
xdat = iris.data | |
ydat = iris.target | |
class Data: | |
def __init__(self, xdat: np.ndarray, ydat: np.ndarray, ratio: float = 0.3) -> Tuple: | |
self.xdat = xdat | |
self.ydat = ydat | |
self.ratio = ratio | |
def partition(self) -> None: | |
scnt = self.xdat.shape[0] / np.unique(self.ydat).shape[0] | |
ntst = int(self.xdat.shape[0] * self.ratio / (np.unique(self.ydat)).shape[0]) | |
idx = np.random.choice(np.arange(0, self.ydat.shape[0] / np.unique(self.ydat).shape[0], dtype = int), ntst, replace = False) | |
for i in np.arange(1, np.unique(self.ydat).shape[0]): | |
idx = np.concatenate((idx, np.random.choice(np.arange((scnt * i), scnt * (i + 1), dtype = int), ntst, replace = False))) | |
self.xtrn = self.xdat[np.where(~np.in1d(np.arange(0, self.ydat.shape[0]), idx))[0], :] | |
self.ytrn = self.ydat[np.where(~np.in1d(np.arange(0, self.ydat.shape[0]), idx))[0]] | |
self.xtst = self.xdat[idx, :] | |
self.ytst = self.ydat[idx] | |
def to_tensor(self, depth: int = 3) -> None: | |
self.xtrn = tf.convert_to_tensor(self.xtrn, dtype = np.float32) | |
self.xtst = tf.convert_to_tensor(self.xtst, dtype = np.float32) | |
self.ytrn = tf.convert_to_tensor(tf.one_hot(self.ytrn, depth = depth)) | |
self.ytst = tf.convert_to_tensor(tf.one_hot(self.ytst, depth = depth)) | |
def batch(self, num: int = 16) -> None: | |
try: | |
size = self.xtrn.shape[0] / num | |
if self.xtrn.shape[0] % num != 0: | |
sizes = [tf.floor(size).numpy().astype(int) for i in range(num)] + [self.xtrn.shape[0] % num] | |
else: | |
sizes = [tf.floor(size).numpy().astype(int) for i in range(num)] | |
self.xtrn_batches = tf.split(self.xtrn, num_or_size_splits = sizes, axis = 0) | |
self.ytrn_batches = tf.split(self.ytrn, num_or_size_splits = sizes, axis = 0) | |
num = int(self.xtst.shape[0] / sizes[0]) | |
if self.xtst.shape[0] % sizes[0] != 0: | |
sizes = [sizes[i] for i in range(num)] + [self.xtst.shape[0] % sizes[0]] | |
else: | |
sizes = [sizes[i] for i in range(num)] | |
self.xtst_batches = tf.split(self.xtst, num_or_size_splits = sizes, axis = 0) | |
self.ytst_batches = tf.split(self.ytst, num_or_size_splits = sizes, axis = 0) | |
except: | |
self.xtrn_batches = [self.xtrn] | |
self.ytrn_batches = [self.ytrn] | |
self.xtst_batches = [self.xtst] | |
self.ytst_batches = [self.ytst] | |
data = Data(xdat, ydat) | |
data.partition() | |
data.to_tensor() | |
data.batch(HyperParams.BATCH_SIZE.value) | |
class Dense: | |
def __init__(self, i: int, o: int, f: Callable[[tf.Tensor], tf.Tensor], initializer: Callable = tf.random.normal) -> None: | |
self.w = tf.Variable(initializer([i, o])) | |
self.b = tf.Variable(initializer([o])) | |
self.f = f | |
def __call__(self, x: tf.Tensor) -> tf.Tensor: | |
if callable(self.f): | |
return self.f(tf.add(tf.matmul(x, self.w), self.b)) | |
else: | |
return tf.add(tf.matmul(x, self.w), self.b) | |
class Chain: | |
def __init__(self, layers: List[Iterable[Dense]]) -> None: | |
self.layers = layers | |
def __call__(self, x: tf.Tensor) -> tf.Tensor: | |
self.out = x; self.params = [] | |
for l in self.layers: | |
self.out = l(self.out) | |
self.params.append([l.w, l.b]) | |
self.params = [j for i in self.params for j in i] | |
return self.out | |
def backward(self, inputs: tf.Tensor, targets: tf.Tensor) -> None: | |
grads = self.grad(inputs, targets) | |
self.optimize(grads, 0.001) | |
def loss(self, preds: tf.Tensor, targets: tf.Tensor) -> tf.Tensor: | |
return tf.reduce_mean( | |
tf.keras.losses.categorical_crossentropy( | |
targets, preds | |
) | |
) | |
def grad(self, inputs: tf.Tensor, targets: tf.Tensor) -> List: | |
with tf.GradientTape() as g: | |
error = self.loss(self(inputs), targets) | |
return g.gradient(error, self.params) | |
def optimize(self, grads: List[tf.Tensor], rate: float) -> None: | |
opt = HyperParams.OPTIMIZER.value(learning_rate = rate) | |
opt.apply_gradients(zip(grads, self.params)) | |
model = Chain([ | |
Dense(data.xtrn.shape[1], HyperParams.HIDDEN_NEURONS.value, HyperParams.ACTIVATION), | |
Dense(HyperParams.HIDDEN_NEURONS.value, HyperParams.OUTPUT_NEURONS.value, HyperParams.NORMALIZER) | |
]) | |
def accuracy(y, yhat): | |
j = 0; correct = [] | |
for i in tf.argmax(y, 1): | |
if i == tf.argmax(yhat[j]): | |
correct.append(1) | |
j += 1 | |
num = tf.cast(tf.reduce_sum(correct), dtype = tf.float32) | |
den = tf.cast(y.shape[0], dtype = tf.float32) | |
return num / den | |
epoch_trn_loss = [] | |
epoch_tst_loss = [] | |
epoch_trn_accy = [] | |
epoch_tst_accy = [] | |
for j in range(HyperParams.EPOCHS.value): | |
trn_loss = []; trn_accy = [] | |
for i in range(len(data.xtrn_batches)): | |
model.backward(data.xtrn_batches[i], data.ytrn_batches[i]) | |
ypred = model(data.xtrn_batches[i]) | |
trn_loss.append(model.loss(ypred, data.ytrn_batches[i])) | |
trn_accy.append(accuracy(data.ytrn_batches[i], ypred)) | |
trn_err = tf.reduce_mean(trn_loss).numpy() | |
trn_acy = tf.reduce_mean(trn_accy).numpy() | |
tst_loss = []; tst_accy = [] | |
for i in range(len(data.xtst_batches)): | |
ypred = model(data.xtst_batches[i]) | |
tst_loss.append(model.loss(ypred, data.ytst_batches[i])) | |
tst_accy.append(accuracy(data.ytst_batches[i], ypred)) | |
tst_err = tf.reduce_mean(tst_loss).numpy() | |
tst_acy = tf.reduce_mean(tst_accy).numpy() | |
epoch_trn_loss.append(trn_err) | |
epoch_tst_loss.append(tst_err) | |
epoch_trn_accy.append(trn_acy) | |
epoch_tst_accy.append(tst_acy) | |
if j % 20 == 0: | |
print("Epoch: {0:4d} \t Training Error: {1:.4f} \t Testing Error: {2:.4f} \t Accuracy Training: {3:.4f} \t Accuracy Testing: {4:.4f}".format(j, trn_err, tst_err, trn_acy, tst_acy)) | |
df = pd.DataFrame({ | |
"trn_loss" : epoch_trn_loss, | |
"trn_accy" : epoch_trn_accy, | |
"tst_loss" : epoch_tst_loss, | |
"tst_accy" : epoch_tst_accy | |
}) | |
df.to_csv("../tf2_output_normal_initializer_batch_size_" + str(HyperParams.BATCH_SIZE.value) + ".csv") |
References
- TensorFlow 2.0 API Documentation
- Difference of Sparse Categorical Crossentropy and Categorical Crossentropy
Software Versions
======== | |
Python | |
======== | |
3.7.4 (v3.7.4:e09359112e, Jul 8 2019, 14:54:52) | |
[Clang 6.0 (clang-600.0.57)] on darwin |