| ²é¿´: 3714 | »Ø¸´: 6 | ||
| µ±Ç°Ö»ÏÔʾÂú×ãÖ¸¶¨Ìõ¼þµÄ»ØÌû£¬µã»÷ÕâÀï²é¿´±¾»°ÌâµÄËùÓлØÌû | ||
HZSte1204гæ (³õÈëÎÄ̳)
|
[ÇóÖú]
ÇóÖúAttributeError: module ' ' has no attribute ''
|
|
|
ÇóÖú¸÷룬ÔËÐдúÂëʱ³öÏÖAttributeError: module 'cs_gan.utils' has no attribute 'get_train_dataset'£¬´úÂëÖÐÓÐfrom cs_gan import utils£¬²é¿´utils.pyÖÐÒ²ÓÐFunction:get_train_dataset £¬³öÏÖ´ËÎÊÌâ¸ÃÈçºÎ½â¾öÄØ£¿ ·¢×ÔСľ³æAndroid¿Í»§¶Ë |
» ²ÂÄãϲ»¶
308Çóµ÷¼Á
ÒѾÓÐ7È˻ظ´
085600 ²ÄÁÏÓ뻯¹¤ 295 Çóµ÷¼Á
ÒѾÓÐ8È˻ظ´
Çóµ÷¼Á²ÄÁÏר˶293
ÒѾÓÐ6È˻ظ´
»¶Ó211±¾¿ÆÍ¬Ñ§£¬¹ýAÇø¹ú¼ÒÏߣ¬AÇø·ÇƫԶһ±¾£¬½»²æÑ§¿Æ¿ÎÌâ×é
ÒѾÓÐ37È˻ظ´
±êÌâ:¼ñ©Ԥ¾¯|08¹¤¿Æ/09ũѧµ÷¼Á!Ó¢ÓïÒªÇóµÍ£¬¹ýÏß¼´Óлú»á!
ÒѾÓÐ10È˻ظ´
ÊÕµ÷¼Á
ÒѾÓÐ7È˻ظ´
332²ÄÁÏÇóµ÷¼Á
ÒѾÓÐ8È˻ظ´
µ÷¼Á
ÒѾÓÐ3È˻ظ´
Ò»Ö¾Ô¸Ìì½ò´óѧ²ÄÁÏÓ뻯¹¤275Çóµ÷¼Á
ÒѾÓÐ17È˻ظ´
²ÄÁϹ¤³Ìר˶£¬Ò»Ö¾Ô¸Öйú¿óÒµ´óѧ£¬×Ü·Ö314£¬Çóµ÷¼Á
ÒѾÓÐ3È˻ظ´
HZSte1204
гæ (³õÈëÎÄ̳)
- Ó¦Öú: 0 (Ó×¶ùÔ°)
- ½ð±Ò: 44.6
- Ìû×Ó: 26
- ÔÚÏß: 2.2Сʱ
- ³æºÅ: 20042637
- ×¢²á: 2019-12-06
- רҵ: ×Ô¶¯»¯
7Â¥2020-04-22 15:46:16
HZSte1204
гæ (³õÈëÎÄ̳)
- Ó¦Öú: 0 (Ó×¶ùÔ°)
- ½ð±Ò: 44.6
- Ìû×Ó: 26
- ÔÚÏß: 2.2Сʱ
- ³æºÅ: 20042637
- ×¢²á: 2019-12-06
- רҵ: ×Ô¶¯»¯
|
´óÀÐÄãºÃ£¬ÇëÎÊÔËÐдúÂëʱ³öÏÖAttributeError: module 'cs_gan.utils' has no attribute 'get_train_dataset'£¬´úÂëÖÐÓÐfrom cs_gan import utils£¬²é¿´utils.pyÖÐÒ²ÓÐFunction:get_train_dataset £¬³öÏÖ´ËÎÊÌâ¸ÃÈçºÎ½â¾öÄØ£¿Ð»Ð» ·¢×ÔСľ³æAndroid¿Í»§¶Ë |
2Â¥2020-04-15 18:52:52
ÌìÌì½ø²½°¡
ľ³æ (ÖøÃûдÊÖ)
- Ó¦Öú: 87 (³õÖÐÉú)
- ½ð±Ò: 6058.7
- É¢½ð: 101
- ºì»¨: 42
- ɳ·¢: 9
- Ìû×Ó: 1793
- ÔÚÏß: 1005.5Сʱ
- ³æºÅ: 3689648
- ×¢²á: 2015-02-15
- ÐÔ±ð: GG
- רҵ: ÀíÂۺͼÆË㻯ѧ
|
Ò»¿´¾ÍÖªµÀÊÇpython´úÂ룬ÏȰÑÄã´úÂëÌù³öÀ´£¬ÔÙ½²ÄãµÄ±¨´í |
» ±¾ÌûÒÑ»ñµÃµÄºì»¨£¨×îÐÂ10¶ä£©

3Â¥2020-04-15 20:41:22
HZSte1204
гæ (³õÈëÎÄ̳)
- Ó¦Öú: 0 (Ó×¶ùÔ°)
- ½ð±Ò: 44.6
- Ìû×Ó: 26
- ÔÚÏß: 2.2Сʱ
- ³æºÅ: 20042637
- ×¢²á: 2019-12-06
- רҵ: ×Ô¶¯»¯
Ëͺ컨һ¶ä |
ÄúºÃ£¬Õâ¸ö´úÂëÊÇYan Wu, Mihaela Rosca, Timothy Lillicrap Deep Compressed Sensing. ICML 2019µÄ¿ªÔ´´úÂ루This is the example code for the following ICML 2019 paper. If you use the code here please cite this paper£©£¬ÍøÖ·https://github.com/deepmind/deepmind-research/tree/master/cs_gan ÏÂÃæÊDZ¨´í£¬ÔËÐк󱨴íµÄmain_cs.pyÎļþÒÔ¼°utils.pyÎļþ´úÂë File "D:\deep compressed sensing\main_cs.py", line 76, in main images = utils.get_train_dataset(data_processor, FLAGS.dataset, AttributeError: module 'cs_gan.utils' has no attribute 'get_train_dataset' #main_cs.py """Training script.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl import app from absl import flags from absl import logging import tensorflow.compat.v1 as tf import tensorflow_probability as tfp from cs_gan import cs from cs_gan import file_utils from cs_gan import utils tfd = tfp.distributions flags.DEFINE_string( 'mode', 'recons', 'Model mode.') flags.DEFINE_integer( 'num_training_iterations', 10000000, 'Number of training iterations.') flags.DEFINE_integer( 'batch_size', 64, 'Training batch size.') flags.DEFINE_integer( 'num_measurements', 25, 'The number of measurements') flags.DEFINE_integer( 'num_latents', 100, 'The number of latents') flags.DEFINE_integer( 'num_z_iters', 3, 'The number of latent optimisation steps.') flags.DEFINE_float( 'z_step_size', 0.01, 'Step size for latent optimisation.') flags.DEFINE_string( 'z_project_method', 'norm', 'The method to project z.') flags.DEFINE_integer( 'summary_every_step', 1000, 'The interval at which to log debug ops.') flags.DEFINE_integer( 'export_every', 10, 'The interval at which to export samples.') flags.DEFINE_string( 'dataset', 'mnist', 'The dataset used for learning (cifar|mnist.') flags.DEFINE_float('learning_rate', 1e-4, 'Learning rate.') flags.DEFINE_string( 'output_dir', '/tmp/cs_gan/cs', 'Location where to save output files.') FLAGS = flags.FLAGS # Log info level (for Hooks). tf.logging.set_verbosity(tf.logging.INFO) def main(argv): del argv utils.make_output_dir(FLAGS.output_dir) data_processor = utils.DataProcessor() images = utils.get_train_dataset(data_processor, FLAGS.dataset, FLAGS.batch_size) logging.info('Learning rate: %d', FLAGS.learning_rate) # Construct optimizers. optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) # Create the networks and models. generator = utils.get_generator(FLAGS.dataset) metric_net = utils.get_metric_net(FLAGS.dataset, FLAGS.num_measurements) model = cs.CS(metric_net, generator, FLAGS.num_z_iters, FLAGS.z_step_size, FLAGS.z_project_method) prior = utils.make_prior(FLAGS.num_latents) generator_inputs = prior.sample(FLAGS.batch_size) model_output = model.connect(images, generator_inputs) optimization_components = model_output.optimization_components debug_ops = model_output.debug_ops reconstructions, _ = utils.optimise_and_sample( generator_inputs, model, images, is_training=False) global_step = tf.train.get_or_create_global_step() update_op = optimizer.minimize( optimization_components.loss, var_list=optimization_components.vars, global_step=global_step) sample_exporter = file_utils.FileExporter( os.path.join(FLAGS.output_dir, 'reconstructions')) # Hooks. debug_ops['it'] = global_step # Abort training on Nans. nan_hook = tf.train.NanTensorHook(optimization_components.loss) # Step counter. step_conter_hook = tf.train.StepCounterHook() checkpoint_saver_hook = tf.train.CheckpointSaverHook( checkpoint_dir=utils.get_ckpt_dir(FLAGS.output_dir), save_secs=10 * 60) loss_summary_saver_hook = tf.train.SummarySaverHook( save_steps=FLAGS.summary_every_step, output_dir=os.path.join(FLAGS.output_dir, 'summaries'), summary_op=utils.get_summaries(debug_ops)) hooks = [checkpoint_saver_hook, nan_hook, step_conter_hook, loss_summary_saver_hook] # Start training. with tf.train.MonitoredSession(hooks=hooks) as sess: logging.info('starting training') for i in range(FLAGS.num_training_iterations): sess.run(update_op) if i % FLAGS.export_every == 0: reconstructions_np, data_np = sess.run([reconstructions, images]) # Create an object which gets data and does the processing. data_np = data_processor.postprocess(data_np) reconstructions_np = data_processor.postprocess(reconstructions_np) sample_exporter.save(reconstructions_np, 'reconstructions') sample_exporter.save(data_np, 'data') if __name__ == '__main__': app.run(main) #utils.py """Tools for latent optimisation.""" import collections import os from absl import logging import numpy as np import tensorflow.compat.v1 as tf import tensorflow_probability as tfp from cs_gan import nets tfd = tfp.distributions class ModelOutputs( collections.namedtuple('AdversarialModelOutputs', ['optimization_components', 'debug_ops'])): """All the information produced by the adversarial module. Fields: * `optimization_components`: A dictionary. Each entry in this dictionary corresponds to a module to train using their own optimizer. The keys are names of the components, and the values are `common.OptimizationComponent` instances. The keys of this dict can be made keys of the configuration used by the main train loop, to define the configuration of the optimization details for each module. * `debug_ops`: A dictionary, from string to a scalar `tf.Tensor`. Quantities used for tracking training. """ class OptimizationComponent( collections.namedtuple('OptimizationComponent', ['loss', 'vars'])): """Information needed by the optimizer to train modules. Usage: `optimizer.minimize( opt_compoment.loss, var_list=opt_component.vars)` Fields: * `loss`: A `tf.Tensor` the loss of the module. * `vars`: A list of variables, the ones which will be used to minimize the loss. """ def cross_entropy_loss(logits, expected): """The cross entropy classification loss between logits and expected values. The loss proposed by the original GAN paper: https://arxiv.org/abs/1406.2661. Args: logits: a `tf.Tensor`, the model produced logits. expected: a `tf.Tensor`, the expected output. Returns: A scalar `tf.Tensor`, the average loss obtained on the given inputs. Raises: ValueError: if the logits do not have shape [batch_size, 2]. """ num_logits = logits.get_shape()[1] if num_logits != 2: raise ValueError(('Invalid number of logits for cross_entropy_loss! ' 'cross_entropy_loss supports only 2 output logits!')) return tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=expected)) def optimise_and_sample(init_z, module, data, is_training): """Optimising generator latent variables and sample.""" if module.num_z_iters == 0: z_final = init_z else: init_loop_vars = (0, _project_z(init_z, module.z_project_method)) loop_cond = lambda i, _: i < module.num_z_iters def loop_body(i, z): loop_samples = module.generator(z, is_training) gen_loss = module.gen_loss_fn(data, loop_samples) z_grad = tf.gradients(gen_loss, z)[0] z -= module.z_step_size * z_grad z = _project_z(z, module.z_project_method) return i + 1, z # Use the following static loop for debugging # z = init_z # for _ in xrange(num_z_iters): # _, z = loop_body(0, z) # z_final = z _, z_final = tf.while_loop(loop_cond, loop_body, init_loop_vars) return module.generator(z_final, is_training), z_final def get_optimisation_cost(initial_z, optimised_z): optimisation_cost = tf.reduce_mean( tf.reduce_sum((optimised_z - initial_z)**2, -1)) return optimisation_cost def _project_z(z, project_method='clip'): """To be used for projected gradient descent over z.""" if project_method == 'norm': z_p = tf.nn.l2_normalize(z, axis=-1) elif project_method == 'clip': z_p = tf.clip_by_value(z, -1, 1) else: raise ValueError('Unknown project_method: {}'.format(project_method)) return z_p class DataProcessor(object): def preprocess(self, x): return x * 2 - 1 def postprocess(self, x): return (x + 1) / 2. def _get_np_data(data_processor, dataset, split='train'): """Get the dataset as numpy arrays.""" index = 0 if split == 'train' else 1 if dataset == 'mnist': # Construct the dataset. x, _ = tf.keras.datasets.mnist.load_data()[index] # Note: tf dataset is binary so we convert it to float. x = x.astype(np.float32) x = x / 255. x = x.reshape((-1, 28, 28, 1)) if dataset == 'cifar': x, _ = tf.keras.datasets.cifar10.load_data()[index] x = x.astype(np.float32) x = x / 255. if data_processor: # Normalize data if a processor is given. x = data_processor.preprocess(x) return x def make_output_dir(output_dir): logging.info('Creating output dir %s', output_dir) if not tf.gfile.IsDirectory(output_dir): tf.gfile.MakeDirs(output_dir) def get_ckpt_dir(output_dir): ckpt_dir = os.path.join(output_dir, 'ckpt') if not tf.gfile.IsDirectory(ckpt_dir): tf.gfile.MakeDirs(ckpt_dir) return ckpt_dir def get_real_data_for_eval(num_eval_samples, dataset, split='valid'): data = _get_np_data(data_processor=None, dataset=dataset, split=split) data = data[:num_eval_samples] return tf.constant(data) def get_summaries(ops): summaries = [] for name, op in ops.items(): # Ensure to log the value ops before writing them in the summary. # We do this instead of a hook to ensure IS/FID are never computed twice. print_op = tf.print(name, [op], output_stream=tf.logging.info) with tf.control_dependencies([print_op]): summary = tf.summary.scalar(name, op) summaries.append(summary) return summaries def get_train_dataset(data_processor, dataset, batch_size): """Creates the training data tensors.""" x_train = _get_np_data(data_processor, dataset, split='train') # Create the TF dataset. dataset = tf.data.Dataset.from_tensor_slices(x_train) # Shuffle and repeat the dataset for training. # This is required because we want to do multiple passes through the entire # dataset when training. dataset = dataset.shuffle(100000).repeat() # Batch the data and return the data batch. one_shot_iterator = dataset.batch(batch_size).make_one_shot_iterator() data_batch = one_shot_iterator.get_next() return data_batch def get_generator(dataset): if dataset == 'mnist': return nets.MLPGeneratorNet() if dataset == 'cifar': return nets.SNGenNet() def get_metric_net(dataset, num_outputs=2): if dataset == 'mnist': return nets.MLPMetricNet(num_outputs) if dataset == 'cifar': return nets.SNMetricNet(num_outputs) def make_prior(num_latents): # Zero mean, unit variance prior. prior_mean = tf.zeros(shape=(num_latents), dtype=tf.float32) prior_scale = tf.ones(shape=(num_latents), dtype=tf.float32) return tfd.Normal(loc=prior_mean, scale=prior_scale) |
4Â¥2020-04-18 12:23:05













»Ø¸´´ËÂ¥
10