tensorflow 变量生成 变量管理 tf.Variable & tf.get_variable & tf.variable_scope

来源:互联网 发布:手机自动关闭数据连接 编辑:程序博客网 时间:2024/05/30 07:14

____tz_zs学习笔记


tf.Variable

官网api:https://www.tensorflow.org/api_docs/python/tf/Variable

def __init__(self,               initial_value=None,                trainable=True,               collections=None,               validate_shape=True,               caching_device=None,               name=None,               variable_def=None,               dtype=None,               expected_shape=None,               import_scope=None):

initial_value:初始化的值,可以是随机数、常数或者是通过其他变量的初始值得到的。

trainable:标记是否加入GraphKeys.TRAINABLE_VARIABLES集合

validate_shape:如果为false则可以更改shape

dtype:变量的类型,不可改变


变量被使用前,需要通过会话(session)运行其初始化方法完成初始化赋值

sess.run(tf.global_variables_initializer)


# -*- coding: utf-8 -*-"""@author: tz_zs变量 初始值为常数的情况"""import tensorflow as tfv1 = tf.Variable(tf.constant(1.0, shape=[1]), name="v1")v2 = tf.Variable(tf.constant(2.0), name="v2")v3 = tf.Variable(2.0, name="v3")result1 = v1 + v2result2 = v1 + v3print(v1)  # <tf.Variable 'v1:0' shape=(1,) dtype=float32_ref>print(v2)  # <tf.Variable 'v2:0' shape=() dtype=float32_ref>print(v3)  # <tf.Variable 'v3:0' shape=() dtype=float32_ref>print(result1)  # Tensor("add:0", shape=(1,), dtype=float32)print(result2)  # Tensor("add_1:0", shape=(1,), dtype=float32)with tf.Session() as sess:    tf.global_variables_initializer().run()    print(sess.run(v1))  # [ 1.]    print(sess.run(v2))  # 2.0    print(sess.run(v3))  # 2.0    print(sess.run(result1))  # [ 3.]    print(sess.run(result2))  # [ 3.]




tf.get_variable & tf.variable_scope

tf.get_variable函数可以用来创建或者获取变量,当创建变量时,与tf.Variable是一样的。

tf.variable_scope函数生成一个上下文管理器,用于控制tf.get_variable

code1

# -*- coding: utf-8 -*-"""@author: tz_zs变量生成之tf.get_variable与tf.variable_scope  reuse参数"""import tensorflow as tfwith tf.variable_scope("a"):    v1 = tf.get_variable("v", [1], initializer=tf.constant_initializer(1.0))# with tf.variable_scope("a"):#     v2 = tf.get_variable("v", [1])   # 报错 ValueError: Variable a/v already exists,with tf.variable_scope("a", reuse=True):    v3 = tf.get_variable("v", [1])    print(v3 == v1)  # Truewith tf.variable_scope("b", reuse=True):    v4 = tf.get_variable("v",                         [1])  # 报错 ValueError: Variable b/v does not exist, or was not created with tf.get_variable().

code2

# -*- coding: utf-8 -*-"""@author: tz_zs变量生成之tf.get_variable与tf.variable_scope命名空间"""import tensorflow as tfv1 = tf.get_variable("a", [1])print(v1.name)with tf.variable_scope("foo"):    v2 = tf.get_variable("a", [1])    print(v2.name)with tf.variable_scope("foo"):    with tf.variable_scope("bar"):        v3 = tf.get_variable("a", [1])        print(v3.name)    v4 = tf.get_variable("b", [1])    print(v4.name)# with tf.variable_scope(""):#     v8 = tf.get_variable("a",[1])  # 会报错,因为名称为空的命名空间,等价于v1的情况,a已经存在了。而这里reuse为None,不能复用,于是报错#     print(v8.name)with tf.variable_scope("", reuse=True):    v5 = tf.get_variable("foo/bar/a", [1])    print(v5.name)    print(v3.name)    print(v5 == v3)    v6 = tf.get_variable("a", [1])    print(v6.name)    print(v1.name)    print(v6 == v1)
运行结果:

a:0foo/a:0foo/bar/a:0foo/b:0foo/bar/a:0foo/bar/a:0Truea:0a:0True





附录1 variables.py:

# Copyright 2015 The TensorFlow Authors. All Rights Reserved.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at##     http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# =============================================================================="""Variable class."""from __future__ import absolute_importfrom __future__ import divisionfrom __future__ import print_functionfrom tensorflow.core.framework import attr_value_pb2from tensorflow.core.framework import variable_pb2from tensorflow.python.framework import dtypesfrom tensorflow.python.framework import opsfrom tensorflow.python.framework import tensor_shapefrom tensorflow.python.ops import array_opsfrom tensorflow.python.ops import control_flow_opsfrom tensorflow.python.ops import math_opsfrom tensorflow.python.ops import state_opsfrom tensorflow.python.util import compatfrom tensorflow.python.util.deprecation import deprecatedclass Variable(object):  """See the @{$variables$Variables How To} for a high  level overview.  A variable maintains state in the graph across calls to `run()`. You add a  variable to the graph by constructing an instance of the class `Variable`.  The `Variable()` constructor requires an initial value for the variable,  which can be a `Tensor` of any type and shape. The initial value defines the  type and shape of the variable. After construction, the type and shape of  the variable are fixed. The value can be changed using one of the assign  methods.  If you want to change the shape of a variable later you have to use an  `assign` Op with `validate_shape=False`.  Just like any `Tensor`, variables created with `Variable()` can be used as  inputs for other Ops in the graph. Additionally, all the operators  overloaded for the `Tensor` class are carried over to variables, so you can  also add nodes to the graph by just doing arithmetic on variables.  ```python  import tensorflow as tf  # Create a variable.  w = tf.Variable(<initial-value>, name=<optional-name>)  # Use the variable in the graph like any Tensor.  y = tf.matmul(w, ...another variable or tensor...)  # The overloaded operators are available too.  z = tf.sigmoid(w + y)  # Assign a new value to the variable with `assign()` or a related method.  w.assign(w + 1.0)  w.assign_add(1.0)  ```  When you launch the graph, variables have to be explicitly initialized before  you can run Ops that use their value. You can initialize a variable by  running its *initializer op*, restoring the variable from a save file, or  simply running an `assign` Op that assigns a value to the variable. In fact,  the variable *initializer op* is just an `assign` Op that assigns the  variable's initial value to the variable itself.  ```python  # Launch the graph in a session.  with tf.Session() as sess:      # Run the variable initializer.      sess.run(w.initializer)      # ...you now can run ops that use the value of 'w'...  ```  The most common initialization pattern is to use the convenience function  `global_variables_initializer()` to add an Op to the graph that initializes  all the variables. You then run that Op after launching the graph.  ```python  # Add an Op to initialize global variables.  init_op = tf.global_variables_initializer()  # Launch the graph in a session.  with tf.Session() as sess:      # Run the Op that initializes global variables.      sess.run(init_op)      # ...you can now run any Op that uses variable values...  ```  If you need to create a variable with an initial value dependent on another  variable, use the other variable's `initialized_value()`. This ensures that  variables are initialized in the right order.  All variables are automatically collected in the graph where they are  created. By default, the constructor adds the new variable to the graph  collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function  `global_variables()` returns the contents of that collection.  When building a machine learning model it is often convenient to distinguish  between variables holding the trainable model parameters and other variables  such as a `global step` variable used to count training steps. To make this  easier, the variable constructor supports a `trainable=<bool>` parameter. If  `True`, the new variable is also added to the graph collection  `GraphKeys.TRAINABLE_VARIABLES`. The convenience function  `trainable_variables()` returns the contents of this collection. The  various `Optimizer` classes use this collection as the default list of  variables to optimize.  """  def __init__(self,               initial_value=None,               trainable=True,               collections=None,               validate_shape=True,               caching_device=None,               name=None,               variable_def=None,               dtype=None,               expected_shape=None,               import_scope=None):    """Creates a new variable with value `initial_value`.    The new variable is added to the graph collections listed in `collections`,    which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.    If `trainable` is `True` the variable is also added to the graph collection    `GraphKeys.TRAINABLE_VARIABLES`.    This constructor creates both a `variable` Op and an `assign` Op to set the    variable to its initial value.    Args:      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,        which is the initial value for the Variable. The initial value must have        a shape specified unless `validate_shape` is set to False. Can also be a        callable with no argument that returns the initial value when called. In        that case, `dtype` must be specified. (Note that initializer functions        from init_ops.py must first be bound to a shape before being used here.)      trainable: If `True`, the default, also adds the variable to the graph        collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as        the default list of variables to use by the `Optimizer` classes.      collections: List of graph collections keys. The new variable is added to        these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.      validate_shape: If `False`, allows the variable to be initialized with a        value of unknown shape. If `True`, the default, the shape of        `initial_value` must be known.      caching_device: Optional device string describing where the Variable        should be cached for reading.  Defaults to the Variable's device.        If not `None`, caches on another device.  Typical use is to cache        on the device where the Ops using the Variable reside, to deduplicate        copying through `Switch` and other conditional statements.      name: Optional name for the variable. Defaults to `'Variable'` and gets        uniquified automatically.      variable_def: `VariableDef` protocol buffer. If not `None`, recreates        the Variable object with its contents. `variable_def` and the other        arguments are mutually exclusive.      dtype: If set, initial_value will be converted to the given type.        If `None`, either the datatype will be kept (if `initial_value` is        a Tensor), or `convert_to_tensor` will decide.      expected_shape: A TensorShape. If set, initial_value is expected        to have this shape.      import_scope: Optional `string`. Name scope to add to the        `Variable.` Only used when initializing from protocol buffer.    Raises:      ValueError: If both `variable_def` and initial_value are specified.      ValueError: If the initial value is not specified, or does not have a        shape and `validate_shape` is `True`.    """    if variable_def:      # If variable_def is provided, recreates the variable from its fields.      if initial_value:        raise ValueError("variable_def and initial_value are mutually "                         "exclusive.")      self._init_from_proto(variable_def, import_scope=import_scope)    else:      # Create from initial_value.      self._init_from_args(          initial_value=initial_value,          trainable=trainable,          collections=collections,          validate_shape=validate_shape,          caching_device=caching_device,          name=name,          dtype=dtype,          expected_shape=expected_shape)  def __repr__(self):    return "<tf.Variable '%s' shape=%s dtype=%s>" % (            self.name, self.get_shape(), self.dtype.name)  def _init_from_args(self,                      initial_value=None,                      trainable=True,                      collections=None,                      validate_shape=True,                      caching_device=None,                      name=None,                      dtype=None,                      expected_shape=None):    """Creates a new variable from arguments.    Args:      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,        which is the initial value for the Variable. The initial value must have        a shape specified unless `validate_shape` is set to False. Can also be a        callable with no argument that returns the initial value when called.        (Note that initializer functions  from init_ops.py must first be bound         to a shape before being used here.)      trainable: If `True`, the default, also adds the variable to the graph        collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as        the default list of variables to use by the `Optimizer` classes.      collections: List of graph collections keys. The new variable is added to        these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.      validate_shape: If `False`, allows the variable to be initialized with a        value of unknown shape. If `True`, the default, the shape of        `initial_value` must be known.      caching_device: Optional device string or function describing where the        Variable should be cached for reading.  Defaults to the Variable's        device.  If not `None`, caches on another device.  Typical use is to        cache on the device where the Ops using the Variable reside, to        deduplicate copying through `Switch` and other conditional statements.      name: Optional name for the variable. Defaults to `'Variable'` and gets        uniquified automatically.      dtype: If set, initial_value will be converted to the given type.        If None, either the datatype will be kept (if initial_value is       a Tensor) or float32 will be used (if it is a Python object convertible       to a Tensor).      expected_shape: Deprecated. Ignored.    Raises:      ValueError: If the initial value is not specified, or does not have a        shape and `validate_shape` is `True`.    """    _ = expected_shape    if initial_value is None:      raise ValueError("initial_value must be specified.")    init_from_fn = callable(initial_value)    if collections is None:      collections = [ops.GraphKeys.GLOBAL_VARIABLES]    if not isinstance(collections, (list, tuple, set)):      raise ValueError(          "collections argument to Variable constructor must be a list, tuple, "          "or set. Got %s of type %s" % (collections, type(collections)))    if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:      collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]    with ops.control_dependencies(None):      with ops.name_scope(name, "Variable", [] if init_from_fn else                          [initial_value]) as name:        if init_from_fn:          # Use attr_scope and device(None) to simulate the behavior of          # colocate_with when the variable we want to colocate with doesn't          # yet exist.          true_name = ops._name_from_scope_name(name)          attr = attr_value_pb2.AttrValue(              list=attr_value_pb2.AttrValue.ListValue(                  s=[compat.as_bytes("loc:@%s" % true_name)]))          # pylint: disable=protected-access          with ops.get_default_graph()._attr_scope({"_class": attr}):            with ops.name_scope("Initializer"),  ops.device(None):              self._initial_value = ops.convert_to_tensor(                  initial_value(), name="initial_value", dtype=dtype)              shape = (self._initial_value.get_shape()                       if validate_shape else tensor_shape.unknown_shape())            self._variable = state_ops.variable_op_v2(                shape,                self._initial_value.dtype.base_dtype,                name=name)        # Or get the initial value from a Tensor or Python object.        else:          self._initial_value = ops.convert_to_tensor(              initial_value, name="initial_value", dtype=dtype)          shape = (self._initial_value.get_shape()                   if validate_shape else tensor_shape.unknown_shape())          # In this case, the variable op can't be created until after the          # initial_value has been converted to a Tensor with a known type.          self._variable = state_ops.variable_op_v2(              shape,              self._initial_value.dtype.base_dtype,              name=name)        # Manually overrides the variable's shape with the initial value's.        if validate_shape:          initial_value_shape = self._initial_value.get_shape()          if not initial_value_shape.is_fully_defined():            raise ValueError("initial_value must have a shape specified: %s" %                             self._initial_value)        # Assigns initial value.        self._initializer_op = state_ops.assign(            self._variable, self._initial_value,            validate_shape=validate_shape).op        # TODO(vrv): Change this class to not take caching_device, but        # to take the op to colocate the snapshot with, so we can use        # colocation rather than devices.        if caching_device is not None:          with ops.device(caching_device):            self._snapshot = array_ops.identity(self._variable, name="read")        else:          with ops.colocate_with(self._variable.op):            self._snapshot = array_ops.identity(self._variable, name="read")    ops.add_to_collections(collections, self)    self._caching_device = caching_device    self._save_slice_info = None  def _init_from_proto(self, variable_def, import_scope=None):    """Creates a new variable from `VariableDef` protocol buffer.    Args:      variable_def: `VariableDef` protocol buffer.      import_scope: Optional `string`. Name scope to add.    """    assert isinstance(variable_def, variable_pb2.VariableDef)    # Create from variable_def.    g = ops.get_default_graph()    self._variable = g.as_graph_element(        ops.prepend_name_scope(variable_def.variable_name,                               import_scope=import_scope))    self._initializer_op = g.as_graph_element(        ops.prepend_name_scope(variable_def.initializer_name,                               import_scope=import_scope))    self._snapshot = g.as_graph_element(        ops.prepend_name_scope(variable_def.snapshot_name,                               import_scope=import_scope))    if variable_def.HasField("save_slice_info_def"):      self._save_slice_info = Variable.SaveSliceInfo(          save_slice_info_def=variable_def.save_slice_info_def)    else:      self._save_slice_info = None    self._caching_device = None  def _as_graph_element(self):    """Conversion function for Graph.as_graph_element()."""    return self._variable  def _AsTensor(self):  # pylint: disable=invalid-name    """Converts this variable to a Tensor.    See @{tf.Variable.value}.    Returns:      A `Tensor` containing the value of the variable.    """    return self._snapshot  def __iter__(self):    """Dummy method to prevent iteration. Do not call.    NOTE(mrry): If we register __getitem__ as an overloaded operator,    Python will valiantly attempt to iterate over the variable's Tensor from 0    to infinity.  Declaring this method prevents this unintended behavior.    Raises:      TypeError: when invoked.    """    raise TypeError("'Variable' object is not iterable.")  def value(self):    """Returns the last snapshot of this variable.    You usually do not need to call this method as all ops that need the value    of the variable call it automatically through a `convert_to_tensor()` call.    Returns a `Tensor` which holds the value of the variable.  You can not    assign a new value to this tensor as it is not a reference to the variable.    To avoid copies, if the consumer of the returned value is on the same device    as the variable, this actually returns the live value of the variable, not    a copy.  Updates to the variable are seen by the consumer.  If the consumer    is on a different device it will get a copy of the variable.    Returns:      A `Tensor` containing the value of the variable.    """    return self._snapshot  def read_value(self):    """Returns the value of this variable, read in the current context.    Can be different from value() if it's on another device, with control    dependencies, etc.    Returns:      A `Tensor` containing the value of the variable.    """    return array_ops.identity(self._variable, name="read")  def _ref(self):    """Returns a reference to this variable.    You usually do not need to call this method as all ops that need a reference    to the variable call it automatically.    Returns is a `Tensor` which holds a reference to the variable.  You can    assign a new value to the variable by passing the tensor to an assign op.    See @{tf.Variable.value} if you want to get the value of the    variable.    Returns:      A `Tensor` that is a reference to the variable.    """    return self._variable  def set_shape(self, shape):    """Overrides the shape for this variable.    Args:      shape: the `TensorShape` representing the overridden shape.    """    self._ref().set_shape(shape)    self.value().set_shape(shape)  def eval(self, session=None):    """In a session, computes and returns the value of this variable.    This is not a graph construction method, it does not add ops to the graph.    This convenience method requires a session where the graph    containing this variable has been launched. If no session is    passed, the default session is used.  See @{tf.Session} for more    information on launching a graph and on sessions.    ```python    v = tf.Variable([1, 2])    init = tf.global_variables_initializer()    with tf.Session() as sess:        sess.run(init)        # Usage passing the session explicitly.        print(v.eval(sess))        # Usage with the default session.  The 'with' block        # above makes 'sess' the default session.        print(v.eval())    ```    Args:      session: The session to use to evaluate this variable. If        none, the default session is used.    Returns:      A numpy `ndarray` with a copy of the value of this variable.    """    return self._variable.eval(session=session)  def initialized_value(self):    """Returns the value of the initialized variable.    You should use this instead of the variable itself to initialize another    variable with a value that depends on the value of this variable.    ```python    # Initialize 'v' with a random tensor.    v = tf.Variable(tf.truncated_normal([10, 40]))    # Use `initialized_value` to guarantee that `v` has been    # initialized before its value is used to initialize `w`.    # The random values are picked only once.    w = tf.Variable(v.initialized_value() * 2.0)    ```    Returns:      A `Tensor` holding the value of this variable after its initializer      has run.    """    with ops.control_dependencies(None):      return control_flow_ops.cond(is_variable_initialized(self),                                   self.read_value,                                   lambda: self.initial_value)  @property  def initial_value(self):    """Returns the Tensor used as the initial value for the variable.    Note that this is different from `initialized_value()` which runs    the op that initializes the variable before returning its value.    This method returns the tensor that is used by the op that initializes    the variable.    Returns:      A `Tensor`.    """    return self._initial_value  def assign(self, value, use_locking=False):    """Assigns a new value to the variable.    This is essentially a shortcut for `assign(self, value)`.    Args:      value: A `Tensor`. The new value for this variable.      use_locking: If `True`, use locking during the assignment.    Returns:      A `Tensor` that will hold the new value of this variable after      the assignment has completed.    """    return state_ops.assign(self._variable, value, use_locking=use_locking)  def assign_add(self, delta, use_locking=False):    """Adds a value to this variable.     This is essentially a shortcut for `assign_add(self, delta)`.    Args:      delta: A `Tensor`. The value to add to this variable.      use_locking: If `True`, use locking during the operation.    Returns:      A `Tensor` that will hold the new value of this variable after      the addition has completed.    """    return state_ops.assign_add(self._variable, delta, use_locking=use_locking)  def assign_sub(self, delta, use_locking=False):    """Subtracts a value from this variable.    This is essentially a shortcut for `assign_sub(self, delta)`.    Args:      delta: A `Tensor`. The value to subtract from this variable.      use_locking: If `True`, use locking during the operation.    Returns:      A `Tensor` that will hold the new value of this variable after      the subtraction has completed.    """    return state_ops.assign_sub(self._variable, delta, use_locking=use_locking)  def scatter_sub(self, sparse_delta, use_locking=False):    """Subtracts `IndexedSlices` from this variable.    This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,    sparse_delta.values)`.    Args:      sparse_delta: `IndexedSlices` to be subtracted from this variable.      use_locking: If `True`, use locking during the operation.    Returns:      A `Tensor` that will hold the new value of this variable after      the scattered subtraction has completed.    Raises:      ValueError: if `sparse_delta` is not an `IndexedSlices`.    """    if not isinstance(sparse_delta, ops.IndexedSlices):      raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)    return state_ops.scatter_sub(        self._variable,        sparse_delta.indices,        sparse_delta.values,        use_locking=use_locking)  def count_up_to(self, limit):    """Increments this variable until it reaches `limit`.    When that Op is run it tries to increment the variable by `1`. If    incrementing the variable would bring it above `limit` then the Op raises    the exception `OutOfRangeError`.    If no error is raised, the Op outputs the value of the variable before    the increment.    This is essentially a shortcut for `count_up_to(self, limit)`.    Args:      limit: value at which incrementing the variable raises an error.    Returns:      A `Tensor` that will hold the variable value before the increment. If no      other Op modifies this variable, the values produced will all be      distinct.    """    return state_ops.count_up_to(self._variable, limit=limit)  def load(self, value, session=None):    """Load new value into this variable    Writes new value to variable's memory. Doesn't add ops to the graph.    This convenience method requires a session where the graph    containing this variable has been launched. If no session is    passed, the default session is used.  See @{tf.Session} for more    information on launching a graph and on sessions.    ```python    v = tf.Variable([1, 2])    init = tf.global_variables_initializer()    with tf.Session() as sess:        sess.run(init)        # Usage passing the session explicitly.        v.load([2, 3], sess)        print(v.eval(sess)) # prints [2 3]        # Usage with the default session.  The 'with' block        # above makes 'sess' the default session.        v.load([3, 4], sess)        print(v.eval()) # prints [3 4]    ```    Args:        value: New variable value        session: The session to use to evaluate this variable. If          none, the default session is used.    Raises:        ValueError: Session is not passed and no default session    """    session = session or ops.get_default_session()    if session is None:      raise ValueError(          "Either session argument should be provided or default session "          "should be established")    session.run(self._initializer_op, {self._initializer_op.inputs[1]: value})  # Conversion to tensor.  @staticmethod  def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):  # pylint: disable=invalid-name    """Utility function for converting a Variable to a Tensor."""    _ = name    if dtype and not dtype.is_compatible_with(v.dtype):      raise ValueError(          "Incompatible type conversion requested to type '%s' for variable "          "of type '%s'" % (dtype.name, v.dtype.name))    if as_ref:      return v._ref()  # pylint: disable=protected-access    else:      return v.value()  @staticmethod  def _OverloadAllOperators():  # pylint: disable=invalid-name    """Register overloads for all operators."""    for operator in ops.Tensor.OVERLOADABLE_OPERATORS:      Variable._OverloadOperator(operator)    # For slicing, bind getitem differently than a tensor (use SliceHelperVar    # instead)    # pylint: disable=protected-access    setattr(Variable, "__getitem__", array_ops._SliceHelperVar)  @staticmethod  def _OverloadOperator(operator):  # pylint: disable=invalid-name    """Defer an operator overload to `ops.Tensor`.    We pull the operator out of ops.Tensor dynamically to avoid ordering issues.    Args:      operator: string. The operator name.    """    def _run_op(a, *args):      # pylint: disable=protected-access      return getattr(ops.Tensor, operator)(a._AsTensor(), *args)    # Propagate __doc__ to wrapper    try:      _run_op.__doc__ = getattr(ops.Tensor, operator).__doc__    except AttributeError:      pass    setattr(Variable, operator, _run_op)  # NOTE(mrry): This enables the Variable's overloaded "right" binary  # operators to run when the left operand is an ndarray, because it  # accords the Variable class higher priority than an ndarray, or a  # numpy matrix.  # TODO(mrry): Convert this to using numpy's __numpy_ufunc__  # mechanism, which allows more control over how Variables interact  # with ndarrays.  __array_priority__ = 100  @property  def name(self):    """The name of this variable."""    return self._variable.name  @property  def initializer(self):    """The initializer operation for this variable."""    return self._initializer_op  @property  def device(self):    """The device of this variable."""    return self._variable.device  @property  def dtype(self):    """The `DType` of this variable."""    return self._variable.dtype  @property  def op(self):    """The `Operation` of this variable."""    return self._variable.op  @property  def graph(self):    """The `Graph` of this variable."""    return self._variable.graph  @property  def shape(self):    """The `TensorShape` of this variable.    Returns:      A `TensorShape`.    """    return self._variable.get_shape()  def get_shape(self):    """Alias of Variable.shape."""    return self.shape  def to_proto(self, export_scope=None):    """Converts a `Variable` to a `VariableDef` protocol buffer.    Args:      export_scope: Optional `string`. Name scope to remove.    Returns:      A `VariableDef` protocol buffer, or `None` if the `Variable` is not      in the specified name scope.    """    if (export_scope is None or        self._variable.name.startswith(export_scope)):      var_def = variable_pb2.VariableDef()      var_def.variable_name = ops.strip_name_scope(          self._variable.name, export_scope)      var_def.initializer_name = ops.strip_name_scope(          self.initializer.name, export_scope)      var_def.snapshot_name = ops.strip_name_scope(          self._snapshot.name, export_scope)      if self._save_slice_info:        var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(            export_scope=export_scope))      return var_def    else:      return None  @staticmethod  def from_proto(variable_def, import_scope=None):    """Returns a `Variable` object created from `variable_def`."""    return Variable(variable_def=variable_def,                    import_scope=import_scope)  class SaveSliceInfo(object):    """Information on how to save this Variable as a slice.    Provides internal support for saving variables as slices of a larger    variable.  This API is not public and is subject to change.    Available properties:    * full_name    * full_shape    * var_offset    * var_shape    """    def __init__(self,                 full_name=None,                 full_shape=None,                 var_offset=None,                 var_shape=None,                 save_slice_info_def=None,                 import_scope=None):      """Create a `SaveSliceInfo`.      Args:        full_name: Name of the full variable of which this `Variable` is a            slice.        full_shape: Shape of the full variable, as a list of int.        var_offset: Offset of this `Variable` into the full variable, as a            list of int.        var_shape: Shape of this `Variable`, as a list of int.        save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,          recreates the SaveSliceInfo object its contents.          `save_slice_info_def` and other arguments are mutually          exclusive.        import_scope: Optional `string`. Name scope to add. Only used          when initializing from protocol buffer.      """      if save_slice_info_def:        assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)        self.full_name = ops.prepend_name_scope(            save_slice_info_def.full_name, import_scope=import_scope)        self.full_shape = [i for i in save_slice_info_def.full_shape]        self.var_offset = [i for i in save_slice_info_def.var_offset]        self.var_shape = [i for i in save_slice_info_def.var_shape]      else:        self.full_name = full_name        self.full_shape = full_shape        self.var_offset = var_offset        self.var_shape = var_shape    @property    def spec(self):      """Computes the spec string used for saving."""      full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "      sl_spec = ":".join([          "%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)      ])      return full_shape_str + sl_spec    def to_proto(self, export_scope=None):      """Returns a SaveSliceInfoDef() proto.      Args:        export_scope: Optional `string`. Name scope to remove.      Returns:        A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not        in the specified name scope.      """      if (export_scope is None or          self.full_name.startswith(export_scope)):        save_slice_info_def = variable_pb2.SaveSliceInfoDef()        save_slice_info_def.full_name = ops.strip_name_scope(            self.full_name, export_scope)        for i in self.full_shape:          save_slice_info_def.full_shape.append(i)        for i in self.var_offset:          save_slice_info_def.var_offset.append(i)        for i in self.var_shape:          save_slice_info_def.var_shape.append(i)        return save_slice_info_def      else:        return None  def _set_save_slice_info(self, save_slice_info):    """Sets the slice info for this `Variable`.    Args:      save_slice_info: A `Variable.SaveSliceInfo` object.    """    self._save_slice_info = save_slice_info  def _get_save_slice_info(self):    return self._save_slice_infoclass PartitionedVariable(object):  """A container for partitioned `Variable` objects."""  class PartitionedVariableIterator(object):    """An iterator that allows accessing the underlying `Variable` objects.    This iterator is necessary to control order of access when Variables    are not partitioned in a standard way along a single axis.    Allows e.g. `list(partitioned_variable)` to return a proper list.    """    def __init__(self, partitioned_variable):      self._ix = 0      self._partitioned_variable = partitioned_variable    def __iter__(self):      return self    def __next__(self):  # For python3 compatibility.      return self.next()    def next(self):      # pylint: disable=protected-access      if self._ix >= len(self._partitioned_variable._variable_list):        raise StopIteration()      variable = self._partitioned_variable._variable_list[self._ix]      # pylint: enable=protected-access      self._ix += 1      return variable  def __init__(self, name, shape, dtype, variable_list, partitions):    """Creates a new partitioned variable wrapper.    Variables passed via the variable_list must contain a save_slice_info    field.  Concatenation and iteration is in lexicographic order according    to the var_offset property of the save_slice_info.    Args:      name: String. Overall name of the variables.      shape: List of integers.  Overall shape of the variables.      dtype: Type of the variables.      variable_list: List of `Variable` that comprise this partitioned variable.      partitions: List of integers.  Number of partitions for each dimension.    Raises:      TypeError: If `variable_list` is not a list of `Variable` objects, or        `partitions` is not a list.      ValueError: If `variable_list` is empty, or the `Variable` shape        information does not match `shape`, or `partitions` has invalid values.    """    if not isinstance(variable_list, (list, tuple)):      raise TypeError(          "variable_list is not a list or tuple: %s" % variable_list)    if not isinstance(partitions, (list, tuple)):      raise TypeError("partitions is not a list or tuple: %s" % partitions)    if not all([p >= 1 for p in partitions]):      raise ValueError("partition values must be positive: %s" % partitions)    if not variable_list:      raise ValueError("variable_list may not be empty")    # pylint: disable=protected-access    for v in variable_list:      # Sort the variable_list lexicographically according to var offset value.      if not all([v._get_save_slice_info() is not None for v in variable_list]):        raise ValueError(            "All variables must have a save_slice_info available: %s"            % [v.name for v in variable_list])      if len(shape) != len(partitions):        raise ValueError("len(shape) != len(partitions): %s vs. %s"                         % (shape, partitions))      if not all([v._get_save_slice_info().full_shape == shape]):        raise ValueError(            "All variables' full shapes must match shape: %s; "            "but full shapes were: %s"            % (shape, str([v._get_save_slice_info().full_shape])))    self._variable_list = sorted(        variable_list, key=lambda v: v._get_save_slice_info().var_offset)    # pylint: enable=protected-access    self._name = name    self._shape = shape    self._dtype = dtype    self._partitions = partitions    self._as_tensor = None  def __iter__(self):    """Return an iterable for accessing the underlying partition Variables."""    return self.PartitionedVariableIterator(self)  def __len__(self):    num_partition_axes = len(self._partition_axes())    if num_partition_axes > 1:      raise ValueError("Cannot get a length for %d > 1 partition axes"                       % num_partition_axes)    return len(self._variable_list)  def _partition_axes(self):    if all([p == 1 for p in self._partitions]):      return [0]    else:      return [i for i, p in enumerate(self._partitions) if p > 1]  def _concat(self):    """Returns the overall concatenated value as a `Tensor`.    This is different from using the partitioned variable directly as a tensor    (through tensor conversion and `as_tensor`) in that it creates a new set of    operations that keeps the control dependencies from its scope.    Returns:      `Tensor` containing the concatenated value.    """    if len(self._variable_list) == 1:      with ops.name_scope(None):        return array_ops.identity(self._variable_list[0], name=self._name)    partition_axes = self._partition_axes()    if len(partition_axes) > 1:      raise NotImplementedError(          "Cannot concatenate along more than one dimension: %s.  "          "Multi-axis partition concat is not supported" % str(partition_axes))    partition_ix = partition_axes[0]    with ops.name_scope(self._name + "/ConcatPartitions/"):      concatenated = array_ops.concat(self._variable_list, partition_ix)    with ops.name_scope(None):      return array_ops.identity(concatenated, name=self._name)  def as_tensor(self):    """Returns the overall concatenated value as a `Tensor`.    The returned tensor will not inherit the control dependencies from the scope    where the value is used, which is similar to getting the value of    `Variable`.    Returns:      `Tensor` containing the concatenated value.    """    with ops.control_dependencies(None):      return self._concat()  @staticmethod  def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):    # pylint: disable=invalid-name    _ = name    if dtype is not None and not dtype.is_compatible_with(v.dtype):      raise ValueError(          "Incompatible type conversion requested to type '%s' for variable "          "of type '%s'" % (dtype.name, v.dtype.name))    if as_ref:      raise NotImplementedError(          "PartitionedVariable doesn't support being used as a reference.")    else:      return v.as_tensor()  @property  def name(self):    return self._name  @property  def dtype(self):    return self._dtype  def get_shape(self):    return self._shape  def _get_variable_list(self):    return self._variable_list  def _get_partitions(self):    return self._partitions  def assign(self, value, use_locking=False):    _ = value, use_locking    raise NotImplementedError(        "assign() has not been implemented for PartitionedVariable.")def global_variables():  """Returns global variables.  Global variables are variables that are shared across machines in a  distributed environment. The `Variable()` constructor or `get_variable()`  automatically adds new variables to the graph collection  `GraphKeys.GLOBAL_VARIABLES`.  This convenience function returns the contents of that collection.  An alternative to global variables are local variables. See  @{tf.local_variables}  Returns:    A list of `Variable` objects.  """  return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)@deprecated("2017-03-02", "Please use tf.global_variables instead.")def all_variables():  """See `tf.global_variables`."""  return global_variables()def _all_saveable_objects():  """Returns all variables and `SaveableObject`s that must be checkpointed.  Returns:    A list of `Variable` and `SaveableObject` to be checkpointed  """  # TODO(andreasst): make this function public once things are settled.  return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) +          ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS))def local_variables():  """Returns local variables.  Local variables - per process variables, usually not saved/restored to  checkpoint and used for temporary or intermediate values.  For example, they can be used as counters for metrics computation or  number of epochs this machine has read data.  The `tf.contrib.framework.local_variable()` function automatically adds the  new variable to `GraphKeys.LOCAL_VARIABLES`.  This convenience function returns the contents of that collection.  An alternative to local variables are global variables. See  @{tf.global_variables}  Returns:    A list of local `Variable` objects.  """  return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)def model_variables():  """Returns all variables in the MODEL_VARIABLES collection.  Returns:    A list of local Variable objects.  """  return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES)def trainable_variables():  """Returns all variables created with `trainable=True`.  When passed `trainable=True`, the `Variable()` constructor automatically  adds new variables to the graph collection  `GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the  contents of that collection.  Returns:    A list of Variable objects.  """  return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)def moving_average_variables():  """Returns all variables that maintain their moving averages.  If an `ExponentialMovingAverage` object is created and the `apply()`  method is called on a list of variables, these variables will  be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.  This convenience function returns the contents of that collection.  Returns:    A list of Variable objects.  """  return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES)def variables_initializer(var_list, name="init"):  """Returns an Op that initializes a list of variables.  After you launch the graph in a session, you can run the returned Op to  initialize all the variables in `var_list`. This Op runs all the  initializers of the variables in `var_list` in parallel.  Calling `initialize_variables()` is equivalent to passing the list of  initializers to `Group()`.  If `var_list` is empty, however, the function still returns an Op that can  be run. That Op just has no effect.  Args:    var_list: List of `Variable` objects to initialize.    name: Optional name for the returned operation.  Returns:    An Op that run the initializers of all the specified variables.  """  if var_list:    return control_flow_ops.group(*[v.initializer for v in var_list], name=name)  return control_flow_ops.no_op(name=name)@deprecated("2017-03-02", "Use `tf.variables_initializer` instead.")def initialize_variables(var_list, name="init"):  """See `tf.variables_initializer`."""  return variables_initializer(var_list, name=name)def global_variables_initializer():  """Returns an Op that initializes global variables.  This is just a shortcut for `variable_initializers(global_variables())`  Returns:    An Op that initializes global variables in the graph.  """  return variables_initializer(global_variables())@deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.")def initialize_all_variables():  """See `tf.global_variables_initializer`."""  return global_variables_initializer()def local_variables_initializer():  """Returns an Op that initializes all local variables.  This is just a shortcut for `variable_initializers(local_variables())`  Returns:    An Op that initializes all local variables in the graph.  """  return variables_initializer(local_variables())@deprecated("2017-03-02", "Use `tf.local_variables_initializer` instead.")def initialize_local_variables():  """See `tf.local_variables_initializer`."""  return local_variables_initializer()def is_variable_initialized(variable):  """Tests if a variable has been initialized.  Args:    variable: A `Variable`.  Returns:    Returns a scalar boolean Tensor, `True` if the variable has been    initialized, `False` otherwise.  """  return state_ops.is_variable_initialized(variable)def assert_variables_initialized(var_list=None):  """Returns an Op to check if variables are initialized.  NOTE: This function is obsolete and will be removed in 6 months.  Please  change your implementation to use `report_uninitialized_variables()`.  When run, the returned Op will raise the exception `FailedPreconditionError`  if any of the variables has not yet been initialized.  Note: This function is implemented by trying to fetch the values of the  variables. If one of the variables is not initialized a message may be  logged by the C++ runtime. This is expected.  Args:    var_list: List of `Variable` objects to check. Defaults to the      value of `global_variables().`  Returns:    An Op, or None if there are no variables.  """  if var_list is None:    var_list = global_variables() + local_variables()  # Backwards compatibility for old-style variables. TODO(touts): remove.  if not var_list:    var_list = []    for op in ops.get_default_graph().get_operations():      if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:        var_list.append(op.outputs[0])  if not var_list:    return None  else:    ranks = []    for var in var_list:      with ops.colocate_with(var.op):        ranks.append(array_ops.rank_internal(var, optimize=False))    if len(ranks) == 1:      return ranks[0]    else:      return array_ops.stack(ranks)def report_uninitialized_variables(var_list=None,                                   name="report_uninitialized_variables"):  """Adds ops to list the names of uninitialized variables.  When run, it returns a 1-D tensor containing the names of uninitialized  variables if there are any, or an empty array if there are none.  Args:    var_list: List of `Variable` objects to check. Defaults to the      value of `global_variables() + local_variables()`    name: Optional name of the `Operation`.  Returns:    A 1-D tensor containing names of the uninitialized variables, or an empty    1-D tensor if there are no variables or no uninitialized variables.  """  if var_list is None:    var_list = global_variables() + local_variables()    # Backwards compatibility for old-style variables. TODO(touts): remove.    if not var_list:      var_list = []      for op in ops.get_default_graph().get_operations():        if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:          var_list.append(op.outputs[0])  with ops.name_scope(name):    if not var_list:      # Return an empty tensor so we only need to check for returned tensor      # size being 0 as an indication of model ready.      return array_ops.constant([], dtype=dtypes.string)    else:      # Get a 1-D boolean tensor listing whether each variable is initialized.      variables_mask = math_ops.logical_not(          array_ops.stack(              [state_ops.is_variable_initialized(v) for v in var_list]))      # Get a 1-D string tensor containing all the variable names.      variable_names_tensor = array_ops.constant([s.op.name for s in var_list])      # Return a 1-D tensor containing all the names of uninitialized variables.      return array_ops.boolean_mask(variable_names_tensor, variables_mask)# pylint: disable=protected-accessops.register_tensor_conversion_function(Variable,                                        Variable._TensorConversionFunction)Variable._OverloadAllOperators()ops.register_tensor_conversion_function(    PartitionedVariable, PartitionedVariable._TensorConversionFunction)# pylint: enable=protected-accessops.register_dense_tensor_like_type(Variable)

附录2 variable_scope.py

# Copyright 2015 The TensorFlow Authors. All Rights Reserved.## Licensed under the Apache License, Version 2.0 (the "License");# you may not use this file except in compliance with the License.# You may obtain a copy of the License at##     http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing, software# distributed under the License is distributed on an "AS IS" BASIS,# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# See the License for the specific language governing permissions and# limitations under the License.# =============================================================================="""A class to store named variables and a scope operator to manage sharing."""from __future__ import absolute_importfrom __future__ import divisionfrom __future__ import print_functionimport collections as collections_libimport copyimport functoolsimport tracebackimport sixfrom six.moves import xrange  # pylint: disable=redefined-builtinfrom tensorflow.python.framework import dtypesfrom tensorflow.python.framework import opsfrom tensorflow.python.framework import tensor_shapefrom tensorflow.python.ops import array_opsfrom tensorflow.python.ops import init_opsfrom tensorflow.python.ops import resource_variable_opsfrom tensorflow.python.ops import variablesfrom tensorflow.python.platform import tf_logging as loggingfrom tensorflow.python.util import tf_contextlib__all__ = ["VariableScope", "get_variable_scope",           "get_variable", "get_local_variable", "variable_scope",           "variable_op_scope", "no_regularizer"]class _PartitionInfo(object):  """Holds partition info used by initializer functions.  """  def __init__(self, full_shape, var_offset):    """Constructor.    Args:      full_shape: Tuple or list of `int` indicating the full combined shape        of the partitioned variables.      var_offset: Tuple or list of `int` specifying offset of this partition        with respect to the full variable for each dimension.    Raises:      TypeError: If `full_shape` or `var_offset` is not a sequence.      ValueError: If `full_shape` or `var_offset` differ in length. If        `var_offset` exceeds `full_shape` in any dimension.    """    if not isinstance(full_shape, collections_lib.Sequence) or isinstance(        full_shape, six.string_types):      raise TypeError(          "`full_shape` must be a sequence (like tuple or list) instead of " +          type(full_shape).__name__)    if not isinstance(var_offset, collections_lib.Sequence) or isinstance(        var_offset, six.string_types):      raise TypeError(          "`var_offset` must be a sequence (like tuple or list) instead of " +          type(var_offset).__name__)    if len(var_offset) != len(full_shape):      raise ValueError(          "Expected equal length, but `var_offset` is of length {} while "          "full_shape is of length {}.".format(              len(var_offset), len(full_shape)))    for i in xrange(len(full_shape)):      offset = var_offset[i]      shape = full_shape[i]      if offset < 0 or offset >= shape:        raise ValueError(            "Expected 0 <= offset < shape but found offset={}, shape={} for "            "var_offset={}, full_shape={}".format(offset, shape, var_offset,                                                  full_shape))    self._full_shape = full_shape    self._var_offset = var_offset  @property  def full_shape(self):    return self._full_shape  @property  def var_offset(self):    return self._var_offset  def single_offset(self, shape):    """Returns the offset when the variable is partitioned in at most one dim.    Args:      shape: Tuple or list of `int` indicating the shape of one specific        variable partition.    Returns:      `int` representing the offset in the dimension along which the variable is       partitioned. Returns 0 if the variable is not being partitioned.    Raises:      ValueError: Depending on self.single_slice_dim().    """    single_slice_dim = self.single_slice_dim(shape)    # If this variable is not being partitioned at all, single_slice_dim() could    # return None.    if single_slice_dim is None:      return 0    return self.var_offset[single_slice_dim]  def single_slice_dim(self, shape):    """Returns the slice dim when the variable is partitioned only in one dim.    Args:      shape: Tuple or list of `int` indicating the shape of one specific        variable partition.    Returns:      `int` representing the dimension that the variable is partitioned in, or      `None` if the variable doesn't seem to be partitioned at all.    Raises:      TypeError: If `shape` is not a sequence.      ValueError: If `shape` is not the same length as `self.full_shape`. If        the variable is partitioned in more than one dimension.    """    if not isinstance(shape, collections_lib.Sequence) or isinstance(        shape, six.string_types):      raise TypeError(          "`shape` must be a sequence (like tuple or list) instead of " +          type(shape).__name__)    if len(shape) != len(self.full_shape):      raise ValueError(          "Expected equal length, but received shape={} of length {} while "          "self.full_shape={} is of length {}.".format(shape, len(              shape), self.full_shape, len(self.full_shape)))    for i in xrange(len(shape)):      if self.var_offset[i] + shape[i] > self.full_shape[i]:        raise ValueError(            "With self.var_offset={}, a partition of shape={} would exceed "            "self.full_shape={} in dimension {}.".format(                self.var_offset, shape, self.full_shape, i))    slice_dim = None    for i in xrange(len(shape)):      if shape[i] == self.full_shape[i]:        continue      if slice_dim is not None:        raise ValueError(            "Cannot use single_slice_dim() with shape={} and "            "self.full_shape={} since slice dim could be either dimension {} "            "or {}.".format(shape, self.full_shape, i, slice_dim))      slice_dim = i    return slice_dimclass _VariableStore(object):  """Variable store that carries a number of named Variables.  New variable names and new variables can be created; all stored  variables are initialized with the initializer passed to __init__.  Attributes:    vars: a dictionary with string names (same as passed in GetVar) as keys          and the corresponding TensorFlow Variables as values.  """  def __init__(self):    """Create a variable store."""    self._vars = {}  # A dictionary of the stored TensorFlow variables.    self._partitioned_vars = {}  # A dict of the stored PartitionedVariables.    self.variable_scopes_count = {}  # Count re-used variable scopes.  def open_variable_scope(self, scope_name):    if scope_name in self.variable_scopes_count:      self.variable_scopes_count[scope_name] += 1    else:      self.variable_scopes_count[scope_name] = 1  def close_variable_subscopes(self, scope_name):    for k in self.variable_scopes_count:      if not scope_name or k.startswith(scope_name + "/"):        self.variable_scopes_count[k] = 0  def variable_scope_count(self, scope_name):    return self.variable_scopes_count.get(scope_name, 0)  def get_variable(self, name, shape=None, dtype=dtypes.float32,                   initializer=None, regularizer=None, reuse=None,                   trainable=True, collections=None, caching_device=None,                   partitioner=None, validate_shape=True, use_resource=None,                   custom_getter=None):    """Gets an existing variable with these parameters or create a new one.    If a variable with the given name is already stored, we return the stored    variable. Otherwise, we create a new one.    Set `reuse` to `True` when you only want to reuse existing Variables.    Set `reuse` to `False` when you only want to create new Variables.    If `reuse` is `None` (the default), both new and existing variables are    returned.    If initializer is `None` (the default), the default initializer passed in    the constructor is used. If that one is `None` too, we use a new    `glorot_uniform_initializer`. If initializer is a Tensor, we use    it as a value and derive the shape from the initializer.    If a partitioner is provided, a `PartitionedVariable` is returned.    Accessing this object as a `Tensor` returns the shards concatenated along    the partition axis.    Some useful partitioners are available.  See, e.g.,    `variable_axis_size_partitioner` and `min_max_variable_partitioner`.    Args:      name: The name of the new or existing variable.      shape: Shape of the new or existing variable.      dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).      initializer: Initializer for the variable.      regularizer: A (Tensor -> Tensor or None) function; the result of        applying it on a newly created variable will be added to the collection        GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.      reuse: a Boolean or `None`. Controls reuse or creation of variables.      trainable: If `True` also add the variable to the graph collection        `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).      collections: List of graph collections keys to add the `Variable` to.        Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).      caching_device: Optional device string or function describing where the        Variable should be cached for reading.  Defaults to the Variable's        device.  If not `None`, caches on another device.  Typical use is to        cache on the device where the Ops using the `Variable` reside, to        deduplicate copying through `Switch` and other conditional statements.      partitioner: Optional callable that accepts a fully defined `TensorShape`        and dtype of the `Variable` to be created, and returns a list of        partitions for each axis (currently only one axis can be partitioned).      validate_shape: If False, allows the variable to be initialized with a        value of unknown shape. If True, the default, the shape of initial_value        must be known.      use_resource: If False, creates a regular Variable. If True, creates        instead an experimental ResourceVariable which has well-defined        semantics. Defaults to False (will later change to True).      custom_getter: Callable that takes as a first argument the true getter,        and allows overwriting the internal get_variable method.        The signature of `custom_getter` should match that of this method,        but the most future-proof version will allow for changes:        `def custom_getter(getter, *args, **kwargs)`.  Direct access to        all `get_variable` parameters is also allowed:        `def custom_getter(getter, name, *args, **kwargs)`.  A simple identity        custom getter that simply creates variables with modified names is:        ```python        def custom_getter(getter, name, *args, **kwargs):          return getter(name + '_suffix', *args, **kwargs)        ```    Returns:      The created or existing `Variable` (or `PartitionedVariable`, if a      partitioner was used).    Raises:      ValueError: when creating a new variable and shape is not declared,        when reusing a variable and specifying a conflicting shape,        or when violating reuse during variable creation.    """    if custom_getter is not None and not callable(custom_getter):      raise ValueError(          "Passed a custom_getter which is not callable: %s" % custom_getter)    # If a *_ref type is passed in an error would be triggered further down the    # stack. We prevent this using base_dtype to get a non-ref version of the    # type, before doing anything else. When _ref types are removed in favour of    # resources, this line can be removed.    try:      dtype = dtype.base_dtype    except AttributeError:      # .base_dtype not existing means that we will try and use the raw dtype      # which was passed in - this might be a NumPy type which is valid.      pass    # This is the main logic of get_variable.  However, custom_getter    # may override this logic.  So we save it as a callable and pass    # it to custom_getter.    # Note: the parameters of _true_getter, and their documentation, match    # *exactly* item-for-item with the docstring of this method.    def _true_getter(name, shape=None, dtype=dtypes.float32,  # pylint: disable=missing-docstring                     initializer=None, regularizer=None, reuse=None,                     trainable=True, collections=None, caching_device=None,                     partitioner=None, validate_shape=True, use_resource=None):      is_scalar = shape is not None and not shape      # Partitioned variable case      if partitioner is not None and not is_scalar:        if not callable(partitioner):          raise ValueError(              "Partitioner must be callable, but received: %s" % partitioner)        with ops.name_scope(None):          return self._get_partitioned_variable(name=name,                                                shape=shape,                                                dtype=dtype,                                                initializer=initializer,                                                regularizer=regularizer,                                                reuse=reuse,                                                trainable=trainable,                                                collections=collections,                                                caching_device=caching_device,                                                partitioner=partitioner,                                                validate_shape=validate_shape,                                                use_resource=use_resource)      # Special case for partitioned variable to allow reuse without having to      # specify partitioner.      if (reuse is True and partitioner is None          and name in self._partitioned_vars):        return self._get_partitioned_variable(name=name,                                              shape=shape,                                              dtype=dtype,                                              initializer=initializer,                                              regularizer=regularizer,                                              reuse=reuse,                                              trainable=trainable,                                              collections=collections,                                              caching_device=caching_device,                                              partitioner=None,                                              validate_shape=validate_shape,                                              use_resource=use_resource)      # Single variable case      if "%s/part_0" % name in self._vars:        raise ValueError(            "No partitioner was provided, but a partitioned version of the "            "variable was found: %s/part_0. Perhaps a variable of the same "            "name was already created with partitioning?" % name)      return self._get_single_variable(          name=name, shape=shape, dtype=dtype,          initializer=initializer, regularizer=regularizer, reuse=reuse,          trainable=trainable, collections=collections,          caching_device=caching_device, validate_shape=validate_shape,          use_resource=use_resource)    if custom_getter is not None:      return custom_getter(          getter=_true_getter, name=name, shape=shape, dtype=dtype,          initializer=initializer, regularizer=regularizer,          reuse=reuse, trainable=trainable, collections=collections,          caching_device=caching_device, partitioner=partitioner,          validate_shape=validate_shape, use_resource=use_resource)    else:      return _true_getter(          name, shape=shape, dtype=dtype,          initializer=initializer, regularizer=regularizer,          reuse=reuse, trainable=trainable, collections=collections,          caching_device=caching_device, partitioner=partitioner,          validate_shape=validate_shape, use_resource=use_resource)  def _get_partitioned_variable(      self, name, partitioner, shape=None, dtype=dtypes.float32,      initializer=None, regularizer=None, reuse=None,      trainable=True, collections=None, caching_device=None,      validate_shape=True, use_resource=None):    """Gets or creates a sharded variable list with these parameters.    The `partitioner` must be a callable that accepts a fully defined    `TensorShape` and returns a sequence of integers (the `partitions`).    These integers describe how to partition the given sharded `Variable`    along the given dimension.  That is, `partitions[1] = 3` means split    the `Variable` into 3 shards along dimension 1.  Currently, sharding along    only one axis is supported.    If the list of variables with the given name (prefix) is already stored,    we return the stored variables. Otherwise, we create a new one.    Set `reuse` to `True` when you only want to reuse existing Variables.    Set `reuse` to `False` when you only want to create new Variables.    If `reuse` is `None` (the default), both new and existing variables are    returned.    If initializer is `None` (the default), the default initializer passed in    the constructor is used. If that one is `None` too, we use a new    `glorot_uniform_initializer`. If initializer is a Tensor, we use    it as a value and derive the shape from the initializer.    If the initializer is a callable, then it will be called for each    shard.  Otherwise the initializer should match the shape of the entire    sharded Variable, and it will be sliced accordingly for each shard.    Some useful partitioners are available.  See, e.g.,    `variable_axis_size_partitioner` and `min_max_variable_partitioner`.    Args:      name: the name of the new or existing sharded variable.      partitioner: Optional callable that accepts a fully defined `TensorShape`        and `dtype` of the Variable to be created, and returns a list of        partitions for each axis (currently only one axis can be partitioned).      shape: shape of the new or existing sharded variable.      dtype: type of the new or existing sharded variable        (defaults to `DT_FLOAT`).      initializer: initializer for the sharded variable.      regularizer: a (Tensor -> Tensor or None) function; the result of        applying it on a newly created variable will be added to the collection        GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.      reuse: a Boolean or `None`. Controls reuse or creation of variables.      trainable: If `True` also add the variable to the graph collection        `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).      collections: List of graph collections keys to add the Variable to.        Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).      caching_device: Optional device string or function describing where the        Variable should be cached for reading.  Defaults to the Variable's        device.  If not `None`, caches on another device.  Typical use is to        cache on the device where the Ops using the Variable reside, to        deduplicate copying through `Switch` and other conditional statements.      validate_shape: If False, allows the variable to be initialized with a        value of unknown shape. If True, the default, the shape of initial_value        must be known.      use_resource: If False, creates a regular Variable. If True, creates an        experimental ResourceVariable which has well-defined semantics. Defaults        to False (will later change to True).    Returns:      A `PartitionedVariable` object.    Raises:      ValueError: when creating a new variable and shape is not declared,        when reusing a variable and specifying a conflicting shape,        when violating reuse during variable creation, or if an existing        sharded variable exists for the given name but with different sharding.    """    initializing_from_value = initializer is not None and isinstance(        initializer, ops.Tensor)    reuse_without_partition = reuse is True and partitioner is None    if name in self._vars:      raise ValueError(          "A partitioner was provided, but an unpartitioned version of the "          "variable was found: %s.  Perhaps a variable of the same name was "          "already created without partitioning?" % name)    shape = tensor_shape.as_shape(shape)    if initializing_from_value:      shape = shape.merge_with(initializer.get_shape())    if not reuse_without_partition:      if not shape.is_fully_defined():        raise ValueError("Shape of a new partitioned variable (%s) must be "                         "fully defined, but instead was %s." % (name, shape))      if shape.ndims < 1:        raise ValueError("A partitioned Variable must have rank at least 1, "                         "shape: %s" % shape)      partitions = partitioner(shape=shape, dtype=dtype)      if not isinstance(partitions, collections_lib.Sequence):        raise ValueError("Partitioner must return a sequence, but saw: %s"                         % partitions)      if len(partitions) != shape.ndims:        raise ValueError(            "Partitioner returned a partition list that does not match the "            "Variable's rank: %s vs. %s" % (partitions, shape))      if any([p < 1 for p in partitions]):        raise ValueError(            "Partitioner returned zero partitions for some axes: %s" %            partitions)    should_check = reuse is not None    if name in self._partitioned_vars:      if should_check and not reuse:        raise ValueError(            "Partitioned variable with name %s already exists. Did you mean to "            "set reuse=True in VarScope?"            % name)      existing_var = self._partitioned_vars[name]      if not shape.is_compatible_with(existing_var.get_shape()):        raise ValueError(            "Trying to reuse partitioned variable %s, but specified shape %s "            "and found shape %s."            % (name, shape, existing_var.get_shape()))      if not dtype.is_compatible_with(existing_var.dtype):        raise ValueError(            "Trying to reuse partitioned variable %s, but specified dtype %s "            "and found dtype %s."            % (name, dtype.name, existing_var.dtype.name))      # pylint: disable=protected-access      if (not reuse_without_partition and          existing_var._get_partitions() != partitions):        raise ValueError(            "Trying to reuse partitioned variable %s, but specified partitions "            "%s and found partitions %s." %            (name, partitions, existing_var._get_partitions()))      # pylint: enable=protected-access      return existing_var    if should_check and reuse:      raise ValueError("PartitionedVariable %s does not exist, or was not "                       "created with tf.get_variable(). Did you mean to set "                       "reuse=None in VarScope?" % name)    slice_dim, slice_shape = _compute_slice_dim_and_shape(        shape.as_list(), partitions)    vs = []    num_slices = partitions[slice_dim]    num_slices_with_excess = shape[slice_dim].value % num_slices    slice_offset = [0] * shape.ndims    if "%s/part_0" % name in self._vars:      if "%s/part_%d" % (name, num_slices - 1) not in self._vars:        raise ValueError(            "Partitioner returned a different partitioning than what was "            "already found.  Partitioner returned %d shards, and shard "            "%s/part_0 was found, but %s/part_%d was not."            % (num_slices, name, name, num_slices - 1))      if "%s/part_%d" % (name, num_slices) in self._vars:        raise ValueError(            "Partitioner returned a different partitioning than what was "            "already found.  Partitioner returned %d shards, and shard "            "%s/part_0 was found, but so was the extra shard %s/part_%d."            % (num_slices, name, name, num_slices))    for i in xrange(num_slices):      var_shape = slice_shape[:]      var_offset = slice_offset[:]      partition_info = _PartitionInfo(          full_shape=shape.as_list(), var_offset=var_offset)      if i < num_slices_with_excess:        var_shape[slice_dim] += 1      slice_offset[slice_dim] += var_shape[slice_dim]      var_full_name = "%s/part_%d" % (name, i)      with ops.name_scope(var_full_name + "/PartitionedInitializer"):        # Create the tensor to initialize the variable with default value.        if initializer is None:          init, initializing_from_value = self._get_default_initializer(              name=name, shape=shape, dtype=dtype)          if initializing_from_value:            init_shape = None          else:            init_shape = var_shape        elif callable(initializer):          init = initializer          init_shape = var_shape        elif isinstance(initializer, ops.Tensor):          init = array_ops.slice(initializer, var_offset, var_shape)          # Use the dtype of the given tensor.          dtype = init.dtype.base_dtype          init_shape = None        else:          init = ops.convert_to_tensor(initializer, dtype=dtype)          init = array_ops.slice(init, var_offset, var_shape)          init_shape = None      with ops.name_scope(None):        var = self._get_single_variable(            name=var_full_name,            shape=init_shape,            dtype=dtype,            initializer=init,            partition_info=partition_info,            regularizer=regularizer,            reuse=reuse,            trainable=trainable,            collections=collections,            caching_device=caching_device,            validate_shape=validate_shape,            use_resource=use_resource)      # pylint: disable=protected-access      var._set_save_slice_info(variables.Variable.SaveSliceInfo(          name, shape.as_list(), var_offset, var_shape))      vs.append(var)      # pylint: enable=protected-access      # pylint: disable=protected-access    partitioned_var = variables.PartitionedVariable(name=name,                                                    shape=shape,                                                    dtype=dtype,                                                    variable_list=vs,                                                    partitions=partitions)    # pylint: enable=protected-access    self._partitioned_vars[name] = partitioned_var    return partitioned_var  def _get_single_variable(self,                           name,                           shape=None,                           dtype=dtypes.float32,                           initializer=None,                           regularizer=None,                           partition_info=None,                           reuse=None,                           trainable=True,                           collections=None,                           caching_device=None,                           validate_shape=True,                           use_resource=None,):    """Get or create a single Variable (e.g. a shard or entire variable).    See the documentation of get_variable above (ignore partitioning components)    for details.    Args:      name: see get_variable.      shape: see get_variable.      dtype: see get_variable.      initializer: see get_variable.      regularizer: see get_variable.      partition_info: _PartitionInfo object.      reuse: see get_variable.      trainable: see get_variable.      collections: see get_variable.      caching_device: see get_variable.      validate_shape: see get_variable.      use_resource: see get_variable.    Returns:      A Variable.  See documentation of get_variable above.    Raises:      ValueError: See documentation of get_variable above.    """    # Set to true if initializer is a constant.    initializing_from_value = False    if initializer is not None and not callable(initializer):      initializing_from_value = True    if shape is not None and initializing_from_value:      raise ValueError("If initializer is a constant, do not specify shape.")    should_check = reuse is not None    dtype = dtypes.as_dtype(dtype)    shape = tensor_shape.as_shape(shape)    if name in self._vars:      # Here we handle the case when returning an existing variable.      if should_check and not reuse:        tb = self._vars[name].op.traceback[::-1]        # Throw away internal tf entries and only take a few lines.        tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]        raise ValueError("Variable %s already exists, disallowed."                         " Did you mean to set reuse=True in VarScope? "                         "Originally defined at:\n\n%s" % (                             name, "".join(traceback.format_list(tb))))      found_var = self._vars[name]      if not shape.is_compatible_with(found_var.get_shape()):        raise ValueError("Trying to share variable %s, but specified shape %s"                         " and found shape %s." % (name, shape,                                                   found_var.get_shape()))      if not dtype.is_compatible_with(found_var.dtype):        dtype_str = dtype.name        found_type_str = found_var.dtype.name        raise ValueError("Trying to share variable %s, but specified dtype %s"                         " and found dtype %s." % (name, dtype_str,                                                   found_type_str))      return found_var    # The code below handles only the case of creating a new variable.    if should_check and reuse:      raise ValueError("Variable %s does not exist, or was not created with "                       "tf.get_variable(). Did you mean to set reuse=None in "                       "VarScope?" % name)    if not shape.is_fully_defined() and not initializing_from_value:      raise ValueError("Shape of a new variable (%s) must be fully defined, "                       "but instead was %s." % (name, shape))    # Create the tensor to initialize the variable with default value.    if initializer is None:      initializer, initializing_from_value = self._get_default_initializer(          name=name, shape=shape, dtype=dtype)    # Clear control dependencies while creating the initializer.    with ops.control_dependencies(None):      if initializing_from_value:        init_val = initializer        variable_dtype = None      else:        # Instantiate initializer if provided initializer is a type object.        if isinstance(initializer, type(init_ops.Initializer)):          initializer = initializer(dtype=dtype)        init_val = lambda: initializer(  # pylint: disable=g-long-lambda            shape.as_list(), dtype=dtype, partition_info=partition_info)        variable_dtype = dtype.base_dtype    # Create the variable.    if use_resource is None:      # Set the default value if unspecified.      use_resource = False    if use_resource:      v = resource_variable_ops.ResourceVariable(          initial_value=init_val,          name=name,          trainable=trainable,          collections=collections,          caching_device=caching_device,          dtype=variable_dtype,          validate_shape=validate_shape)    else:      v = variables.Variable(          initial_value=init_val,          name=name,          trainable=trainable,          collections=collections,          caching_device=caching_device,          dtype=variable_dtype,          validate_shape=validate_shape)    self._vars[name] = v    logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,                 format(shape), initializer)    # Run the regularizer if requested and save the resulting loss.    if regularizer:      with ops.colocate_with(v.op):        with ops.name_scope(name + "/Regularizer/"):          loss = regularizer(v)        if loss is not None:          logging.vlog(1, "Applied regularizer to %s and added the result %s "                       "to REGULARIZATION_LOSSES.", v.name, loss.name)          ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)    return v  # Initialize variable when no initializer provided  def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):    """Provide a default initializer and a corresponding value.    Args:      name: see get_variable.      shape: see get_variable.      dtype: see get_variable.    Returns:      initializer and initializing_from_value. See get_variable above.    Raises:      ValueError: When giving unsupported dtype.    """    # If dtype is DT_FLOAT, provide a uniform unit scaling initializer    if dtype.is_floating:      initializer = init_ops.glorot_uniform_initializer()      initializing_from_value = False    # If dtype is DT_INT/DT_UINT, provide a default value `zero`    # If dtype is DT_BOOL, provide a default value `FALSE`    elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:      initializer = init_ops.zeros_initializer()(          shape=shape, dtype=dtype.base_dtype)      initializing_from_value = True    # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?    else:      raise ValueError("An initializer for variable %s of %s is required"                       % (name, dtype.base_dtype))    return initializer, initializing_from_value# To stop regularization, use this regularizerdef no_regularizer(_):  """Use this function to prevent regularization of variables."""  return Noneclass VariableScope(object):  """Variable scope object to carry defaults to provide to `get_variable`.  Many of the arguments we need for `get_variable` in a variable store are most  easily handled with a context. This object is used for the defaults.  Attributes:    name: name of the current scope, used as prefix in get_variable.    initializer: default initializer passed to get_variable.    regularizer: default regularizer passed to get_variable.    reuse: Boolean or None, setting the reuse in get_variable.    caching_device: string, callable, or None: the caching device passed to      get_variable.    partitioner: callable or `None`: the partitioner passed to `get_variable`.    custom_getter: default custom getter passed to get_variable.    name_scope: The name passed to `tf.name_scope`.    dtype: default type passed to get_variable (defaults to DT_FLOAT).    use_resource: if False, create a normal Variable; if True create an      experimental ResourceVariable with well-defined semantics. Defaults      to False (will later change to True).  """  def __init__(self,               reuse,               name="",               initializer=None,               regularizer=None,               caching_device=None,               partitioner=None,               custom_getter=None,               name_scope="",               dtype=dtypes.float32,               use_resource=None):    """Creates a new VariableScope with the given properties."""    self._name = name    self._initializer = initializer    self._regularizer = regularizer    self._reuse = reuse    self._caching_device = caching_device    self._partitioner = partitioner    self._custom_getter = custom_getter    self._name_scope = name_scope    self._dtype = dtype    self._use_resource = use_resource  @property  def name(self):    return self._name  @property  def original_name_scope(self):    return self._name_scope  @property  def reuse(self):    return self._reuse  @property  def initializer(self):    return self._initializer  @property  def dtype(self):    return self._dtype  @property  def use_resource(self):    return self._use_resource  @property  def regularizer(self):    return self._regularizer  @property  def caching_device(self):    return self._caching_device  @property  def partitioner(self):    return self._partitioner  @property  def custom_getter(self):    return self._custom_getter  def reuse_variables(self):    """Reuse variables in this scope."""    self._reuse = True  def set_initializer(self, initializer):    """Set initializer for this scope."""    self._initializer = initializer  def set_dtype(self, dtype):    """Set data type for this scope."""    self._dtype = dtype  def set_use_resource(self, use_resource):    """Sets whether to use ResourceVariables for this scope."""    self._use_resource = use_resource  def set_regularizer(self, regularizer):    """Set regularizer for this scope."""    self._regularizer = regularizer  def set_caching_device(self, caching_device):    """Set caching_device for this scope."""    self._caching_device = caching_device  def set_partitioner(self, partitioner):    """Set partitioner for this scope."""    self._partitioner = partitioner  def set_custom_getter(self, custom_getter):    """Set custom getter for this scope."""    self._custom_getter = custom_getter  def get_collection(self, name):    """Get this scope's variables."""    scope = self._name + "/" if self._name else ""    return ops.get_collection(name, scope)  def trainable_variables(self):    """Get this scope's trainable variables."""    return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)  def global_variables(self):    """Get this scope's global variables."""    return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)  def get_variable(self,                   var_store,                   name,                   shape=None,                   dtype=None,                   initializer=None,                   regularizer=None,                   reuse=None,                   trainable=True,                   collections=None,                   caching_device=None,                   partitioner=None,                   validate_shape=True,                   use_resource=None,                   custom_getter=None,):    """Gets an existing variable with this name or create a new one."""    if regularizer is None:      regularizer = self._regularizer    if caching_device is None:      caching_device = self._caching_device    if partitioner is None:      partitioner = self._partitioner    if custom_getter is None:      custom_getter = self._custom_getter    if reuse is None:      reuse = self._reuse    full_name = self.name + "/" + name if self.name else name    # Variable names only depend on variable_scope (full_name here),    # not name_scope, so we reset it below for the time of variable creation.    with ops.name_scope(None):      # Check that `initializer` dtype and `dtype` are consistent before      # replacing them with defaults.      if (dtype is not None and initializer is not None and          not callable(initializer)):        init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype        if init_dtype != dtype:          raise ValueError("Initializer type '%s' and explicit dtype '%s' "                           "don't match." % (init_dtype, dtype))      if initializer is None:        initializer = self._initializer      if dtype is None:        dtype = self._dtype      if use_resource is None:        use_resource = self._use_resource      return var_store.get_variable(          full_name, shape=shape, dtype=dtype, initializer=initializer,          regularizer=regularizer, reuse=reuse, trainable=trainable,          collections=collections, caching_device=caching_device,          partitioner=partitioner, validate_shape=validate_shape,          use_resource=use_resource, custom_getter=custom_getter)  def _get_partitioned_variable(self,                                var_store,                                name,                                shape=None,                                dtype=None,                                initializer=None,                                regularizer=None,                                trainable=True,                                collections=None,                                caching_device=None,                                partitioner=None,                                validate_shape=True,                                use_resource=None):    """Gets an existing variable with this name or create a new one."""    if initializer is None:      initializer = self._initializer    if regularizer is None:      regularizer = self._regularizer    if caching_device is None:      caching_device = self._caching_device    if partitioner is None:      partitioner = self._partitioner    if dtype is None:      dtype = self._dtype    if use_resource is None:      use_resource = self._use_resource    if self._custom_getter is not None:      raise ValueError(          "Private access to _get_partitioned_variable is not allowed when "          "a custom getter is set.  Current custom getter: %s.  "          "It is likely that you're using create_partitioned_variables.  "          "If so, consider instead using get_variable with a non-empty "          "partitioner parameter instead." % self._custom_getter)    if partitioner is None:      raise ValueError("No partitioner was specified")    # This allows the variable scope name to be used as the variable name if    # this function is invoked with an empty name arg, for backward    # compatibility with create_partitioned_variables().    full_name_list = []    if self.name:      full_name_list.append(self.name)    if name:      full_name_list.append(name)    full_name = "/".join(full_name_list)    # Variable names only depend on variable_scope (full_name here),    # not name_scope, so we reset it below for the time of variable creation.    with ops.name_scope(None):      # pylint: disable=protected-access      return var_store._get_partitioned_variable(          full_name, shape=shape, dtype=dtype, initializer=initializer,          regularizer=regularizer, reuse=self.reuse, trainable=trainable,          collections=collections, caching_device=caching_device,          partitioner=partitioner, validate_shape=validate_shape,          use_resource=use_resource)      # pylint: enable=protected-access_VARSTORE_KEY = ("__variable_store",)_VARSCOPE_KEY = ("__varscope",)def get_variable_scope():  """Returns the current variable scope."""  scope = ops.get_collection(_VARSCOPE_KEY)  if scope:  # This collection has at most 1 element, the default scope at [0].    return scope[0]  scope = VariableScope(False)  ops.add_to_collection(_VARSCOPE_KEY, scope)  return scopedef _get_default_variable_store():  store = ops.get_collection(_VARSTORE_KEY)  if store:    return store[0]  store = _VariableStore()  ops.add_to_collection(_VARSTORE_KEY, store)  return storedef get_variable(name,                 shape=None,                 dtype=None,                 initializer=None,                 regularizer=None,                 trainable=True,                 collections=None,                 caching_device=None,                 partitioner=None,                 validate_shape=True,                 use_resource=None,                 custom_getter=None):  return get_variable_scope().get_variable(      _get_default_variable_store(), name, shape=shape, dtype=dtype,      initializer=initializer, regularizer=regularizer, trainable=trainable,      collections=collections, caching_device=caching_device,      partitioner=partitioner, validate_shape=validate_shape,      use_resource=use_resource, custom_getter=custom_getter)get_variable_or_local_docstring = (    """%s%sThis function prefixes the name with the current variable scopeand performs reuse checks. See the@{$variable_scope$Variable Scope How To}for an extensive description of how reusing works. Here is a basic example:```pythonwith tf.variable_scope("foo"):    v = tf.get_variable("v", [1])  # v.name == "foo/v:0"    w = tf.get_variable("w", [1])  # w.name == "foo/w:0"with tf.variable_scope("foo", reuse=True):    v1 = tf.get_variable("v")  # The same as v above.```If initializer is `None` (the default), the default initializer passed inthe variable scope will be used. If that one is `None` too, a`glorot_uniform_initializer` will be used. The initializer can also bea Tensor, in which case the variable is initialized to this value and shape.Similarly, if the regularizer is `None` (the default), the default regularizerpassed in the variable scope will be used (if that is `None` too,then by default no regularization is performed).If a partitioner is provided, a `PartitionedVariable` is returned.Accessing this object as a `Tensor` returns the shards concatenated alongthe partition axis.Some useful partitioners are available.  See, e.g.,`variable_axis_size_partitioner` and `min_max_variable_partitioner`.Args:  name: The name of the new or existing variable.  shape: Shape of the new or existing variable.  dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).  initializer: Initializer for the variable if one is created.  regularizer: A (Tensor -> Tensor or None) function; the result of    applying it on a newly created variable will be added to the collection    @{tf.GraphKeys.REGULARIZATION_LOSSES} and can be used for regularization.  %scollections: List of graph collections keys to add the Variable to.    Defaults to `[%s]` (see `tf.Variable`).  caching_device: Optional device string or function describing where the    Variable should be cached for reading.  Defaults to the Variable's    device.  If not `None`, caches on another device.  Typical use is to    cache on the device where the Ops using the Variable reside, to    deduplicate copying through `Switch` and other conditional statements.  partitioner: Optional callable that accepts a fully defined `TensorShape`    and `dtype` of the Variable to be created, and returns a list of    partitions for each axis (currently only one axis can be partitioned).  validate_shape: If False, allows the variable to be initialized with a      value of unknown shape. If True, the default, the shape of initial_value      must be known.  use_resource: If False, creates a regular Variable. If true, creates an    experimental ResourceVariable instead with well-defined semantics.    Defaults to False (will later change to True).  custom_getter: Callable that takes as a first argument the true getter, and    allows overwriting the internal get_variable method.    The signature of `custom_getter` should match that of this method,    but the most future-proof version will allow for changes:    `def custom_getter(getter, *args, **kwargs)`.  Direct access to    all `get_variable` parameters is also allowed:    `def custom_getter(getter, name, *args, **kwargs)`.  A simple identity    custom getter that simply creates variables with modified names is:    ```python    def custom_getter(getter, name, *args, **kwargs):      return getter(name + '_suffix', *args, **kwargs)    ```Returns:  The created or existing `Variable` (or `PartitionedVariable`, if a  partitioner was used).Raises:  ValueError: when creating a new variable and shape is not declared,    when violating reuse during variable creation, or when `initializer` dtype    and `dtype` don't match. Reuse is set inside `variable_scope`.""")get_variable.__doc__ = get_variable_or_local_docstring % (    "Gets an existing variable with these parameters or create a new one.",    "",    "trainable: If `True` also add the variable to the graph collection\n"    "    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n  ",    "GraphKeys.GLOBAL_VARIABLES")@functools.wraps(get_variable)def get_local_variable(*args, **kwargs):  kwargs["trainable"] = False  if "collections" in kwargs:    kwargs["collections"] += [ops.GraphKeys.LOCAL_VARIABLES]  else:    kwargs["collections"] = [ops.GraphKeys.LOCAL_VARIABLES]  return get_variable(*args, **kwargs)get_local_variable.__doc__ = get_variable_or_local_docstring % (    "Gets an existing *local* variable or creates a new one.",    "Behavior is the same as in `get_variable`, except that variables are\n"    "added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"    "`False`.\n",    "",    "GraphKeys.LOCAL_VARIABLES")def _get_partitioned_variable(name,                              shape=None,                              dtype=None,                              initializer=None,                              regularizer=None,                              trainable=True,                              collections=None,                              caching_device=None,                              partitioner=None,                              validate_shape=True,                              use_resource=None):  """Gets or creates a sharded variable list with these parameters.  The `partitioner` must be a callable that accepts a fully defined  `TensorShape` and returns a sequence of integers (the `partitions`).  These integers describe how to partition the given sharded `Variable`  along the given dimension.  That is, `partitions[1] = 3` means split  the `Variable` into 3 shards along dimension 1.  Currently, sharding along  only one axis is supported.  If the list of variables with the given name (prefix) is already stored,  we return the stored variables. Otherwise, we create a new one.  Set `reuse` to `True` when you only want to reuse existing Variables.  Set `reuse` to `False` when you only want to create new Variables.  If `reuse` is `None` (the default), both new and existing variables are  returned.  If initializer is `None` (the default), the default initializer passed in  the constructor is used. If that one is `None` too, we use a new  `glorot_uniform_initializer`. If initializer is a Tensor, we use  it as a value and derive the shape from the initializer.  If the initializer is a callable, then it will be called for each  shard.  Otherwise the initializer should match the shape of the entire  sharded Variable, and it will be sliced accordingly for each shard.  Some useful partitioners are available.  See, e.g.,  `variable_axis_size_partitioner` and `min_max_variable_partitioner`.  Args:    name: The name of the new or existing variable.    shape: Shape of the new or existing variable.    dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).    initializer: Initializer for the variable if one is created.    regularizer: A (Tensor -> Tensor or None) function; the result of      applying it on a newly created variable will be added to the collection      GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.    trainable: If `True` also add the variable to the graph collection      `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).    collections: List of graph collections keys to add the Variable to.      Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).    caching_device: Optional device string or function describing where the      Variable should be cached for reading.  Defaults to the Variable's      device.  If not `None`, caches on another device.  Typical use is to      cache on the device where the Ops using the Variable reside, to      deduplicate copying through `Switch` and other conditional statements.    partitioner: Optional callable that accepts a fully defined `TensorShape`      and `dtype` of the Variable to be created, and returns a list of      partitions for each axis (currently only one axis can be partitioned).    validate_shape: If False, allows the variable to be initialized with a        value of unknown shape. If True, the default, the shape of initial_value        must be known.    use_resource: If False, creates a regular Variable. If True, creates an      experimental ResourceVariable instead which has well-defined semantics.      Defaults to False (will later change to True).  Returns:    A tuple `(shards, partitions)` where `shards` is the list of `Variable`    shards and `partitions` is the output of the partitioner on the input    shape.  Raises:    ValueError: when creating a new variable and shape is not declared,      or when violating reuse during variable creation. Reuse is set inside      `variable_scope`.  """  # pylint: disable=protected-access  scope = get_variable_scope()  if scope.custom_getter is not None:    raise ValueError(        "Private access to _get_partitioned_variable is not allowed when "        "a custom getter is set.  Current custom getter: %s.  "        "It is likely that you're using create_partitioned_variables.  "        "If so, consider instead using get_variable with a non-empty "        "partitioner parameter instead." % scope.custom_getter)  return scope._get_partitioned_variable(      _get_default_variable_store(), name, shape=shape, dtype=dtype,      initializer=initializer, regularizer=regularizer, trainable=trainable,      collections=collections, caching_device=caching_device,      partitioner=partitioner, validate_shape=validate_shape,      use_resource=use_resource)  # pylint: enable=protected-access@tf_contextlib.contextmanagerdef _pure_variable_scope(name_or_scope,                         reuse=None,                         initializer=None,                         regularizer=None,                         caching_device=None,                         partitioner=None,                         custom_getter=None,                         old_name_scope=None,                         dtype=dtypes.float32,                         use_resource=None):  """Creates a context for the variable_scope, see `variable_scope` for docs.  Note: this does not create a name scope.  Args:    name_or_scope: `string` or `VariableScope`: the scope to open.    reuse: `True` or `None`; if `True`, we go into reuse mode for this scope as      well as all sub-scopes; if `None`, we just inherit the parent scope reuse.    initializer: default initializer for variables within this scope.    regularizer: default regularizer for variables within this scope.    caching_device: default caching device for variables within this scope.    partitioner: default partitioner for variables within this scope.    custom_getter: default custom getter for variables within this scope.    old_name_scope: the original name scope when re-entering a variable scope.    dtype: type of the variables within this scope (defaults to `DT_FLOAT`).    use_resource: If False, variables in this scope will be regular Variables.      If True, experimental ResourceVariables will be creates instead, with      well-defined semantics. Defaults to False (will later change to True).  Yields:    A scope that can be captured and reused.  Raises:    ValueError: when trying to reuse within a create scope, or create within      a reuse scope, or if reuse is not `None` or `True`.    TypeError: when the types of some arguments are not appropriate.  """  get_variable_scope()  # Ensure that a default exists, then get a pointer.  # Get the reference to the collection as we want to modify it in place.  default_varscope = ops.get_collection_ref(_VARSCOPE_KEY)  old = default_varscope[0]  var_store = _get_default_variable_store()  if isinstance(name_or_scope, VariableScope):    new_name = name_or_scope.name  else:    new_name = old.name + "/" + name_or_scope if old.name else name_or_scope  try:    var_store.open_variable_scope(new_name)    if isinstance(name_or_scope, VariableScope):      old_subscopes = copy.copy(var_store.variable_scopes_count)      name_scope = name_or_scope._name_scope  # pylint: disable=protected-access      # Handler for the case when we jump to a shared scope.      #   We create a new VariableScope (default_varscope[0]) that contains      #   a copy of the provided shared scope, possibly with changed reuse      #   and initializer, if the user requested this.      default_varscope[0] = VariableScope(          name_or_scope.reuse if reuse is None else reuse,          name=new_name,          initializer=name_or_scope.initializer,          regularizer=name_or_scope.regularizer,          caching_device=name_or_scope.caching_device,          partitioner=name_or_scope.partitioner,          dtype=name_or_scope.dtype,          custom_getter=name_or_scope.custom_getter,          name_scope=name_scope,          use_resource=name_or_scope.use_resource)      if initializer is not None:        default_varscope[0].set_initializer(initializer)      if regularizer is not None:        default_varscope[0].set_regularizer(regularizer)      if caching_device is not None:        default_varscope[0].set_caching_device(caching_device)      if partitioner is not None:        default_varscope[0].set_partitioner(partitioner)      if custom_getter is not None:        default_varscope[0].set_custom_getter(            _maybe_wrap_custom_getter(                custom_getter, name_or_scope.custom_getter))      if dtype is not None:        default_varscope[0].set_dtype(dtype)      if use_resource is not None:        default_varscope[0].set_use_resource(use_resource)      yield default_varscope[0]    else:      # Handler for the case when we just prolong current variable scope.      #   VariableScope with name extended by the provided one, and inherited      #   reuse and initializer (except if the user provided values to set).      reuse = reuse or old.reuse  # Re-using is inherited by sub-scopes.      default_varscope[0] = VariableScope(          reuse,          name=new_name,          initializer=old.initializer,          regularizer=old.regularizer,          caching_device=old.caching_device,          partitioner=old.partitioner,          dtype=old.dtype,          use_resource=old.use_resource,          custom_getter=old.custom_getter,          name_scope=old_name_scope or name_or_scope)      if initializer is not None:        default_varscope[0].set_initializer(initializer)      if regularizer is not None:        default_varscope[0].set_regularizer(regularizer)      if caching_device is not None:        default_varscope[0].set_caching_device(caching_device)      if partitioner is not None:        default_varscope[0].set_partitioner(partitioner)      if custom_getter is not None:        default_varscope[0].set_custom_getter(            _maybe_wrap_custom_getter(custom_getter, old.custom_getter))      if dtype is not None:        default_varscope[0].set_dtype(dtype)      if use_resource is not None:        default_varscope[0].set_use_resource(use_resource)      yield default_varscope[0]  finally:    var_store.close_variable_subscopes(new_name)    # If jumping out from a non-prolonged scope, restore counts.    if isinstance(name_or_scope, VariableScope):      var_store.variable_scopes_count = old_subscopes    default_varscope[0] = olddef _maybe_wrap_custom_getter(custom_getter, old_getter):  """Wrap a call to a custom_getter to use the old_getter internally."""  if old_getter is None:    return custom_getter  # The new custom_getter should call the old one  def wrapped_custom_getter(getter, *args, **kwargs):    # Call:    #  custom_getter(    #    lambda: old_getter(true_getter, ...), *args, **kwargs)    # which means custom_getter will call old_getter, which    # will call the true_getter, perform any intermediate    # processing, and return the results to the current    # getter, which will also perform additional processing.    return custom_getter(        functools.partial(old_getter, getter),        *args, **kwargs)  return wrapped_custom_getterdef _get_unique_variable_scope(prefix):  """Get a name with the given prefix unique in the current variable scope."""  var_store = _get_default_variable_store()  current_scope = get_variable_scope()  name = current_scope.name + "/" + prefix if current_scope.name else prefix  if var_store.variable_scope_count(name) == 0:    return prefix  idx = 1  while var_store.variable_scope_count(name + ("_%d" % idx)) > 0:    idx += 1  return prefix + ("_%d" % idx)# pylint: disable=g-doc-return-or-yield@tf_contextlib.contextmanagerdef variable_scope(name_or_scope,                   default_name=None,                   values=None,                   initializer=None,                   regularizer=None,                   caching_device=None,                   partitioner=None,                   custom_getter=None,                   reuse=None,                   dtype=None,                   use_resource=None):  """Returns a context manager for defining ops that creates variables (layers).  This context manager validates that the (optional) `values` are from  the same graph, ensures that graph is the default graph, and pushes a  name scope and a variable scope.  If `name_or_scope` is not None, it is used as is. If `scope` is None, then  `default_name` is used.  In that case, if the same name has been previously  used in the same scope, it will made unique be appending `_N` to it.  Variable scope allows to create new variables and to share already created  ones while providing checks to not create or share by accident. For details,  see the @{$variable_scope$Variable Scope How To},  here we present only a few basic examples.  Simple example of how to create a new variable:  ```python  with tf.variable_scope("foo"):      with tf.variable_scope("bar"):          v = tf.get_variable("v", [1])          assert v.name == "foo/bar/v:0"  ```  Basic example of sharing a variable:  ```python  with tf.variable_scope("foo"):      v = tf.get_variable("v", [1])  with tf.variable_scope("foo", reuse=True):      v1 = tf.get_variable("v", [1])  assert v1 == v  ```  Sharing a variable by capturing a scope and setting reuse:  ```python  with tf.variable_scope("foo") as scope:      v = tf.get_variable("v", [1])      scope.reuse_variables()      v1 = tf.get_variable("v", [1])  assert v1 == v  ```  To prevent accidental sharing of variables, we raise an exception when  getting an existing variable in a non-reusing scope.  ```python  with tf.variable_scope("foo"):      v = tf.get_variable("v", [1])      v1 = tf.get_variable("v", [1])      #  Raises ValueError("... v already exists ...").  ```  Similarly, we raise an exception when trying to get a variable that  does not exist in reuse mode.  ```python  with tf.variable_scope("foo", reuse=True):      v = tf.get_variable("v", [1])      #  Raises ValueError("... v does not exists ...").  ```  Note that the `reuse` flag is inherited: if we open a reusing scope,  then all its sub-scopes become reusing as well.  A note about name scoping: Setting `reuse` does not impact the naming of other  ops such as mult. See related discussion on [github#6189](https://github.com/tensorflow/tensorflow/issues/6189)  Note that up to and including version 1.0, it was allowed (though  explicitly discouraged) to pass False to the reuse argument, yielding  undocumented behaviour slightly different from None. Starting at 1.1.0  passing None and False as reuse has exactly the same effect.  Args:    name_or_scope: `string` or `VariableScope`: the scope to open.    default_name: The default name to use if the `name_or_scope` argument is      `None`, this name will be uniquified. If name_or_scope is provided it      won't be used and therefore it is not required and can be None.    values: The list of `Tensor` arguments that are passed to the op function.    initializer: default initializer for variables within this scope.    regularizer: default regularizer for variables within this scope.    caching_device: default caching device for variables within this scope.    partitioner: default partitioner for variables within this scope.    custom_getter: default custom getter for variables within this scope.    reuse: `True` or `None`; if `True`, we go into reuse mode for this scope as      well as all sub-scopes; if `None`, we just inherit the parent scope reuse.    dtype: type of variables created in this scope (defaults to the type      in the passed scope, or inherited from parent scope).    use_resource: If False, all variables will be regular Variables. If True,      experimental ResourceVariables with well-defined semantics will be used      instead. Defaults to False (will later change to True).  Returns:    A scope that can be to captured and reused.  Raises:    ValueError: when trying to reuse within a create scope, or create within      a reuse scope.    TypeError: when the types of some arguments are not appropriate.  """  if default_name is None and name_or_scope is None:    raise TypeError("If default_name is None then name_or_scope is required")  if not (reuse is True or reuse is False or reuse is None):    raise ValueError("The reuse parameter must be True or False or None.")  if reuse is False:  # We don't allow non-inheriting scopes, False = None here.    reuse = None  if values is None:    values = []  g = ops._get_graph_from_inputs(values)  # pylint: disable=protected-access  with g.as_default():    if name_or_scope is not None:      if not isinstance(name_or_scope, (VariableScope,) + six.string_types):        raise TypeError("VariableScope: name_or_scope must be a string or "                        "VariableScope.")      if isinstance(name_or_scope, six.string_types):        name_scope = name_or_scope      else:        name_scope = name_or_scope.name.split("/")[-1]      if name_scope:        with ops.name_scope(name_scope) as cur_name_scope:          if isinstance(name_or_scope, six.string_types):            old_name_scope = cur_name_scope          else:            old_name_scope = name_or_scope.original_name_scope          with _pure_variable_scope(              name_or_scope,              reuse=reuse,              initializer=initializer,              regularizer=regularizer,              caching_device=caching_device,              partitioner=partitioner,              custom_getter=custom_getter,              old_name_scope=old_name_scope,              dtype=dtype,              use_resource=use_resource) as vs:            yield vs      else:        # This can only happen if someone is entering the root variable scope.        with _pure_variable_scope(            name_or_scope,            reuse=reuse,            initializer=initializer,            regularizer=regularizer,            caching_device=caching_device,            partitioner=partitioner,            custom_getter=custom_getter,            dtype=dtype,            use_resource=use_resource) as vs:          yield vs    else:  # Here name_or_scope is None. Using default name, but made unique.      if reuse:        raise ValueError("reuse=True cannot be used without a name_or_scope")      with ops.name_scope(default_name) as scope:        unique_default_name = _get_unique_variable_scope(default_name)        with _pure_variable_scope(            unique_default_name,            initializer=initializer,            regularizer=regularizer,            caching_device=caching_device,            partitioner=partitioner,            custom_getter=custom_getter,            old_name_scope=scope,            dtype=dtype,            use_resource=use_resource) as vs:          yield vs# pylint: disable=g-doc-return-or-yield@tf_contextlib.contextmanagerdef variable_op_scope(values,                      name_or_scope,                      default_name=None,                      initializer=None,                      regularizer=None,                      caching_device=None,                      partitioner=None,                      custom_getter=None,                      reuse=None,                      dtype=None,                      use_resource=None):  """Deprecated: context manager for defining an op that creates variables."""  logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"               " use tf.variable_scope(name, default_name, values)")  with variable_scope(name_or_scope,                      default_name=default_name,                      values=values,                      initializer=initializer,                      regularizer=regularizer,                      caching_device=caching_device,                      partitioner=partitioner,                      custom_getter=custom_getter,                      reuse=reuse,                      dtype=dtype,                      use_resource=use_resource) as scope:    yield scopedef _compute_slice_dim_and_shape(full_shape, slicing):  """Computes which dimension is being sliced and the typical slice shape."""  slice_shape = [0] * len(full_shape)  slice_dim = None  for dim, num_slices in enumerate(slicing):    dim_size = full_shape[dim]    if num_slices <= 0 or dim_size < num_slices:      raise ValueError("Cannot create %d slices for size %d. shape: %s, "                       "slicing: %s" %                       (num_slices, full_shape[dim], full_shape, slicing))    if num_slices == 1:      # Not slicing in this dimension.      slice_shape[dim] = dim_size    elif slice_dim is not None:      # We only support slicing along one of the dimensions.      raise ValueError("Can only slice a variable along one dimension: "                       "shape: %s, slicing: %s" % (full_shape, slicing))    else:      # Note: We will add any extras onto the last slice, later.      slice_dim = dim      slice_shape[dim] = dim_size // num_slices  # Degenerate case: If "slicing" was all ones, pretend we are slicing along  # the first dimension.  if slice_dim is None:    slice_dim = 0  return slice_dim, slice_shapedef variable(initial_value=None,             trainable=True,             collections=None,             validate_shape=True,             caching_device=None,             name=None,             dtype=None):  if get_variable_scope().use_resource:    return resource_variable_ops.ResourceVariable(        initial_value=initial_value, trainable=trainable,        collections=collections, validate_shape=validate_shape,        caching_device=caching_device, name=name, dtype=dtype)  else:    return variables.Variable(        initial_value=initial_value, trainable=trainable,        collections=collections, validate_shape=validate_shape,        caching_device=caching_device, name=name, dtype=dtype)














阅读全文
0 0
原创粉丝点击