Tensorflow实现AlexNet卷积神经网络及运算时间评测-创新互联
本文实例为大家分享了Tensorflow实现AlexNet卷积神经网络的具体实现代码,供大家参考,具体内容如下
创新互联建站坚持“要么做到,要么别承诺”的工作理念,服务领域包括:网站建设、成都做网站、企业官网、英文网站、手机端网站、网站推广等服务,满足客户于互联网时代的青县网站设计、移动媒体设计的需求,帮助企业找到有效的互联网解决方案。努力成为您成熟可靠的网络建设合作伙伴!之前已经介绍过了AlexNet的网络构建了,这次主要不是为了训练数据,而是为了对每个batch的前馈(Forward)和反馈(backward)的平均耗时进行计算。在设计网络的过程中,分类的结果很重要,但是运算速率也相当重要。尤其是在跟踪(Tracking)的任务中,如果使用的网络太深,那么也会导致实时性不好。
from datetime import datetime import math import time import tensorflow as tf batch_size = 32 num_batches = 100 def print_activations(t): print(t.op.name, '', t.get_shape().as_list()) def inference(images): parameters = [] with tf.name_scope('conv1') as scope: kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], dtype = tf.float32, stddev = 1e-1), name = 'weights') conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding = 'SAME') biases = tf.Variable(tf.constant(0.0, shape = [64], dtype = tf.float32), trainable = True, name = 'biases') bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name = scope) print_activations(conv1) parameters += [kernel, biases] lrn1 = tf.nn.lrn(conv1, 4, bias = 1.0, alpha = 0.001 / 9, beta = 0.75, name = 'lrn1') pool1 = tf.nn.max_pool(lrn1, ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = 'VALID', name = 'pool1') print_activations(pool1) with tf.name_scope('conv2') as scope: kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], dtype = tf.float32, stddev = 1e-1), name = 'weights') conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding = 'SAME') biases = tf.Variable(tf.constant(0.0, shape = [192], dtype = tf.float32), trainable = True, name = 'biases') bias = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(bias, name = scope) parameters += [kernel, biases] print_activations(conv2) lrn2 = tf.nn.lrn(conv2, 4, bias = 1.0, alpha = 0.001 / 9, beta = 0.75, name = 'lrn2') pool2 = tf.nn.max_pool(lrn2, ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = 'VALID', name = 'pool2') print_activations(pool2) with tf.name_scope('conv3') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384], dtype = tf.float32, stddev = 1e-1), name = 'weights') conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding = 'SAME') biases = tf.Variable(tf.constant(0.0, shape = [384], dtype = tf.float32), trainable = True, name = 'biases') bias = tf.nn.bias_add(conv, biases) conv3 = tf.nn.relu(bias, name = scope) parameters += [kernel, biases] print_activations(conv3) with tf.name_scope('conv4') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256], dtype = tf.float32, stddev = 1e-1), name = 'weights') conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding = 'SAME') biases = tf.Variable(tf.constant(0.0, shape = [256], dtype = tf.float32), trainable = True, name = 'biases') bias = tf.nn.bias_add(conv, biases) conv4 = tf.nn.relu(bias, name = scope) parameters += [kernel, biases] print_activations(conv4) with tf.name_scope('conv5') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype = tf.float32, stddev = 1e-1), name = 'weights') conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding = 'SAME') biases = tf.Variable(tf.constant(0.0, shape = [256], dtype = tf.float32), trainable = True, name = 'biases') bias = tf.nn.bias_add(conv, biases) conv5 = tf.nn.relu(bias, name = scope) parameters += [kernel, biases] print_activations(conv5) pool5 = tf.nn.max_pool(conv5, ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = 'VALID', name = 'pool5') print_activations(pool5) return pool5, parameters def time_tensorflow_run(session, target, info_string): num_steps_burn_in = 10 total_duration = 0.0 total_duration_squared = 0.0 for i in range(num_batches + num_steps_burn_in): start_time = time.time() _ = session.run(target) duration = time.time() - start_time if i >= num_steps_burn_in: if not i % 10: print('%s: step %d, duration = %.3f' %(datetime.now(), i - num_steps_burn_in, duration)) total_duration += duration total_duration_squared += duration * duration mn = total_duration / num_batches vr = total_duration_squared / num_batches - mn * mn sd = math.sqrt(vr) print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %(datetime.now(), info_string, num_batches, mn, sd)) def run_benchmark(): with tf.Graph().as_default(): image_size = 224 images = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3], dtype = tf.float32, stddev = 1e-1)) pool5, parameters = inference(images) init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) time_tensorflow_run(sess, pool5, "Forward") objective = tf.nn.l2_loss(pool5) grad = tf.gradients(objective, parameters) time_tensorflow_run(sess, grad, "Forward-backward") run_benchmark()
另外有需要云服务器可以了解下创新互联scvps.cn,海内外云服务器15元起步,三天无理由+7*72小时售后在线,公司持有idc许可证,提供“云服务器、裸金属服务器、高防服务器、香港服务器、美国服务器、虚拟主机、免备案服务器”等云主机租用服务以及企业上云的综合解决方案,具有“安全稳定、简单易用、服务可用性高、性价比高”等特点与优势,专为企业上云打造定制,能够满足用户丰富、多元化的应用场景需求。
网站题目:Tensorflow实现AlexNet卷积神经网络及运算时间评测-创新互联
网页链接:http://pwwzsj.com/article/ddhsoe.html