# 构建你的独家TensorFlow模型

## 定义计算图

`class Model:       def __init__(self, data, target):         data_size = int(data.get_shape()[1])         target_size = int(target.get_shape()[1])         weight = tf.Variable(tf.truncated_normal([data_size, target_size]))         bias = tf.Variable(tf.constant(0.1, shape=[target_size]))         incoming = tf.matmul(data, weight) + bias         prediction = tf.nn.softmax(incoming)         cross_entropy = -tf.reduce_sum(target, tf.log(prediction))         self._optimize = tf.train.RMSPropOptimizer(0.03).minimize(cross_entropy)         mistakes = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))         self._error = tf.reduce_mean(tf.cast(mistakes, tf.float32))       @property     def optimize(self):         return self._optimize       @property     def error(self):         return self._error `

TensorFlow基本代码如何定义模型是十分基础的。但是，它也有一些问题。最值得注意的是，整个图形定义功能单一，即构造者。这既不是特别易读取也不可重复使用。

## 使用属性

`class Model:       def __init__(self, data, target):         self.data = data         self.target = target         self._prediction = None         self._optimize = None         self._error = None       @property     def prediction(self):         if not self._prediction:             data_size = int(self.data.get_shape()[1])             target_size = int(self.target.get_shape()[1])             weight = tf.Variable(tf.truncated_normal([data_size, target_size]))             bias = tf.Variable(tf.constant(0.1, shape=[target_size]))             incoming = tf.matmul(self.data, weight) + bias             self._prediction = tf.nn.softmax(incoming)         return self._prediction       @property     def optimize(self):         if not self._optimize:             cross_entropy = -tf.reduce_sum(self.target, tf.log(self.prediction))             optimizer = tf.train.RMSPropOptimizer(0.03)             self._optimize = optimizer.minimize(cross_entropy)         return self._optimize       @property     def error(self):         if not self._error:             mistakes = tf.not_equal(                 tf.argmax(self.target, 1), tf.argmax(self.prediction, 1))             self._error = tf.reduce_mean(tf.cast(mistakes, tf.float32))         return self._error `

## 延迟属性构建

Python是一种非常灵活的语言。让我来告诉你如何从刚才的例子中剔除冗余代码。我们将使用一个像@property这样只有一次评估函数的构建者。它将结果存储在一个用成员名字命名的构建功能下（用下划线前缀），并在后续任何调用时返回该值。如果你还没有使用自定义构建，你可能也想看看这个指南。

`import functools     def lazy_property(function):     attribute = '_' + function.__name__       @property     @functools.wraps(function)     def wrapper(self):         if not hasattr(self, attribute):             setattr(self, attribute, function(self))         return getattr(self, attribute)     return wrapper `

`class Model:       def __init__(self, data, target):         self.data = data         self.target = target         self.prediction         self.optimize         self.error       @lazy_property     def prediction(self):         data_size = int(self.data.get_shape()[1])         target_size = int(self.target.get_shape()[1])         weight = tf.Variable(tf.truncated_normal([data_size, target_size]))         bias = tf.Variable(tf.constant(0.1, shape=[target_size]))         incoming = tf.matmul(self.data, weight) + bias         return tf.nn.softmax(incoming)       @lazy_property     def optimize(self):         cross_entropy = -tf.reduce_sum(self.target, tf.log(self.prediction))         optimizer = tf.train.RMSPropOptimizer(0.03)         return optimizer.minimize(cross_entropy)       @lazy_property     def error(self):         mistakes = tf.not_equal(             tf.argmax(self.target, 1), tf.argmax(self.prediction, 1))         return tf.reduce_mean(tf.cast(mistakes, tf.float32)) `