#!/usr/bin/python
# -*- coding: UTF-8 -*-
# @date: 2017/12/23 23:28
# @name: first_tf_1223
# @author:vickey-wu

from __future__ import print_function
import tensorflow as tf
import os

# disable error
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# constant
node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0)  # node2 dtype also equal tf.float32 implicitly
print(node1, node2)

# SSSession
sess = tf.Session()  # SSSession

# placeholder
a = tf.placeholder(tf.float32)  # A placeholder is a promise to provide a value later
b = tf.placeholder(tf.float32)
adder_node = a + b
print(sess.run(adder_node, {a: 3, b: 4.5}))  # fetches=a, feed_dict=dict
print(sess.run(adder_node, {a: [1, 3], b: [2, 4]}))  # feed_dict=tuple

# VVVariable
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W * x + b
init = tf.global_variables_initializer()    # tf.Variable must be explicitly initialize, tf.constant
sess.run(init)
print(sess.run(linear_model, {x: [1, 2, 3, 4]}))  # while x=1, x=2, ... linear_model = ?

# loss function to evaluate a model we build is good or not
y = tf.placeholder(tf.float32)  # desired values
squared_deltas = tf.square(linear_model - y)  # creates a vector of error delta
loss = tf.reduce_sum(squared_deltas)  # create a single scalar that abstracts the error of all examples
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))

# manually reassign the values of W and b to get optimal solution of linear_model
fixW = tf.assign(W, [-1.])  # tf.assign change initialized Variable value
fixb = tf.assign(b, [1.])
sess.run([fixW, fixb])
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))

# tf.train API
# machine learning is to find the correct model parameters automatically
# TensorFlow provides optimizers that slowly change each variable in order to minimize the loss function
# The simplest optimizer is gradient descent
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
sess.run(init)
for i in range(1000):
    sess.run(train, {x: [1, 2, 3, 4, ], y: [0, -1, -2, -3]})
print(sess.run([W, b]))

###########################
# complete trainable linear regression model
# model parameters
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
# model input and output
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
linear_model = W * x + b

# loss
loss = tf.reduce_sum(tf.square(linear_model - y))
# optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)

# training data
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
    sess.run(train, {x: x_train, y: y_train})

# evaluate training accuracy
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})
print("W: %s b: %s loss: %s" % (curr_W, curr_b, curr_loss))
#########################

##########################
import numpy as np
# import tensorflow as tf

# Declare list of features
feature_columns = [tf.feature_column.numeric_column("x", shape=[1])]
# an estimator is the front end to invoke training and evaluation.
estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns)
# tensorflow provides many helper method to read and set up data sets
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.estimator.inputs.numpy_input_fn(
    {"x": x_train}, y_train, batch_size=4, num_epochs=None, shuffle=True
)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
    {"x": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
    {"x": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False
)

# we can invoke 1000 training steps by invoking the method and passing the training data set.
estimator.train(input_fn=input_fn, steps=1000)

# Here we evaluate how well our model did.
train_metrics = estimator.evaluate(input_fn=train_input_fn)
eval_metrics = estimator.evaluate(input_fn=eval_input_fn)
print("train metrics: %r" % train_metrics)
print("eval metrics: %r" % eval_metrics)

#######################