githubimport tensorflow as tffrom absl.flags import FLAGS@tf.functiondef trans
tf.lookup.StaticHashTable 本质是tensorflow 内置字典,在yolov3 tf代码中多次应用def load_tfrecord_dataset(file_pattern, class_file, size=416): LINE_NUMBER = -1 # TODO: use tf.lookup.TextFileIndex.LINE_NUMBER cla
ataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])for element in dataset: print(element)dataset =
import tensorflow as tfarr=tf.constant([[1,2,3],[4,5,6]])print(arr)print('-'*30)for k in rang
tensorflow 动态数组随时可以读取import tensorflow as tfta = tf.TensorArray(tf.float32, size=0, dynamic_size=True, clear_after_read=False)ta = ta.write(0, 10)ta = ta.write(1, 20)ta = ta.write(2, 30)print(ta.read(
import tensorflow as tftf.minimum([5,2,3],[2,3,4])<tf.Tensor: shape=(3,), dtype=int32, numpy=array([2, 2, 3], dtype=int32)>tf.minimum([[3,6,7],[5,2,3]],[2,3,4])<tf.Tensor: shape=(2, 3), dtype
对应位置的索引赋值这里是一维坐标赋值import tensorflow as tftensor = [0, 0, 0, 0, 0, 0, 0, 0] # tf.rank(tensor) == 1indices =[ [1], [3],[4], [7]] # num_updates == 4, index_depth == 1updates = [ 9, 10, 1
hape)(1, 1, 4)(4,)文档链接
对于tf.red
在之前
t =[0, 1, 2, 3, 4]tf.roll(t, shift=2, axis=0)<tf.Tensor: shape=(5,), dtype=int32, numpy=array([3, 4, 0, 1, 2], dtype=int32)>这里的 axis=[0]比较奇怪,0轴表示列,1轴表示行t=[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]t1=tf.
import tensorflow as tfdef make_variables(k, initializer): return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))v1, v2 = ma
tf.eye(2)<tf.Tensor: shape=(2, 2), dtype=float32, numpy=array([[1., 0.]
tf.fill([2, 3], 9)<tf.Tensor: shape=(2, 3), dtype=int32, numpy=array([[9, 9, 9], [9, 9, 9]], dtype=int32)>
da a, x: a * x, elems)print(sum1.numpy())sum2 = tf.foldl(lambda a, x: a + x, elems)print(sum2.numpy())
p_fn(lambda x: x * x, elems)<tf.Tensor: shape=(6,), dtype=int64, numpy=array([ 1, 4, 9, 16, 25, 36])>
通过继承tf.Module代替object任何tf.Variable或tf.Module分配给对象的属性的实例可以使用被收集variables , trainable_variables或submodules属性:import tensorflow as tf class Dense(tf.Module): def __init__(self, in_features, out_feature
tf.ones([3, 4], tf.int32)<tf.Tensor: sha
import tensorflow as tf# shape of tensor 't' is [2, 2, 3]t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])tf.rank(t) # 3张量的秩与矩阵的秩不一样.张量的秩是唯一选择张量的每个元素所需的索引的数量.可以理解为维度秩也被称为 “order”,“deg
tf.reverse_sequence 只翻转前n个数据,
import tensorflow as tft=tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9])tf.reshape(t, [3, 3])t=tf.constant( [ [[1, 1], [2, 2]], [[3, 3], [4, 4]] ])# tensor 't' has shape [2, 2, 2]tf.reshape(t, [2,
impo
添加链接描述https://github.com/wziji/deep_ctr/tree/master/ESMM
对于tf.argmax,这个函数有点奇怪,axis=0指的是计算矩阵每列的最大值索引,axis=1计算行最大值索引与numpy 相同import tensorflow as tfimport numpy as npa=np.array([[2,4,5,7],[9,3,6,2]])print('-'*30+'分割线'+'-'*30)print(a)print('-'*30+'分割线'+'-'*30)a1=tf.argmax(a,axis=0)print('tf.argmax(a,ax
import tensorflow as tfx = tf.constant([1, 2, 3])y = tf.broadcast_to(x, [5, 3])print(y)tf.Tensor([[1 2 3] [1 2 3] [1 2 3] [1 2 3] [1 2 3]], shape=(5, 3), dtype=int32)
计算给定符号形状的广播的形状,1.广播的原则如果两个数组的后缘维度(trailing dimension,即从末尾开始算起的维度)的轴长度相符,2.或其中的一方的长度为1,则认为它们是广播兼容的。广播会在缺失和(或)长度为1的维度上进行。张量的广播机制import tensorflow as tfshape_x = (6, 3)shape_y = (5, 1, 3)c=tf.broadcast_dynamic_shape(shape_x, shape_y)print(c)
tensorflow 的字面翻译是----张量(tensor)—流(flow)为什么要引入张量的广播机制,原因是有利可图,这个利益就是能节约内存空间,说白了就是省钱!
import tensorflow as tfimport numpy as npelems = np.array([1, 2, 3, 4, 5, 6])sum = tf.scan(lambda a, x: a + x, elems)# sum == [1, 3, 6, 10, 15, 21]
import tensorflow as tfimage = tf.zeros([10,10,3])print(image.shape.as_list( int(cc.shape.as_list())print(
import osimport tensorflow as tfimport cProfiletf.executing_eagerly() x = [[2.]]m = tf.matmul(x, x)print("hello, {}".format(m))hello, [[4.]]
Copyright © 2005-2024 51CTO.COM 版权所有 京ICP证060544号