数值错误:检查输入时出错:期望 conv2d_input 具有4个维度,但得到形状为(None,1)的数组。

5

我完成了训练包含20个类的模型,达到了0.9993的准确率,并正在进行测试。我遵循这个教程,但遇到了错误。

prediction = model.predict(['test1.jpg'])

训练数据被定义为:

for features, label in training_data:
    x.append(features)
    y.append(label)

x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE,1)

这是我对CNN的定义:

x = pickle.load(open("x.pickle", "rb" ))
y = pickle.load(open("y.pickle", "rb"))

x = x/255.0

model = Sequential()
model.add(Conv2D(64,(3,3), input_shape = x.shape[1:IMG_SIZE]))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2,2)))

model.add(Conv2D(64,(3,3), input_shape  = x.shape[1:IMG_SIZE]))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2,2)))

model.add(Flatten())
model.add(Dense(64))

model.add(Dense(20))
model.add(Activation("sigmoid"))

这里是我的模型总结:

Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 222, 222, 64)      640       
_________________________________________________________________
activation (Activation)      (None, 222, 222, 64)      0         
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 111, 111, 64)      0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 109, 109, 64)      36928     
_________________________________________________________________
activation_1 (Activation)    (None, 109, 109, 64)      0         
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 54, 54, 64)        0         
_________________________________________________________________
flatten (Flatten)            (None, 186624)            0         
_________________________________________________________________
dense (Dense)                (None, 64)                11944000  
_________________________________________________________________
dense_1 (Dense)              (None, 20)                1300      
_________________________________________________________________
activation_2 (Activation)    (None, 20)                0         
=================================================================
Total params: 11,982,868
Trainable params: 11,982,868
Non-trainable params: 0
_________________________________________________________________

我收到的错误信息是:

--------------------------------------------------------------------------- ValueError Traceback (most recent call last) in ----> 1 prediction = model.predict(['test1.jpg'])

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training.py in predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing) 907 max_queue_size=max_queue_size, 908 workers=workers, --> 909 use_multiprocessing=use_multiprocessing) 910 911 def reset_metrics(self):

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in predict(self, model, x, batch_size, verbose, steps, callbacks, **kwargs) 460 return self._model_iteration( 461 model, ModeKeys.PREDICT, x=x, batch_size=batch_size, verbose=verbose, --> 462 steps=steps, callbacks=callbacks, **kwargs) 463 464

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _model_iteration(self, model, mode, x, y, batch_size, verbose, sample_weight, steps, callbacks, **kwargs) 442 mode=mode, 443 training_context=training_context, --> 444 total_epochs=1) 445 cbks.make_logs(model, epoch_logs, result, mode) 446

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs) 121 step=step, mode=mode, size=current_batch_size) as batch_logs: 122 try: --> 123 batch_outs = execution_function(iterator) 124 except (StopIteration, errors.OutOfRangeError): 125 # TODO(kaftan): File bug about tf function and errors.OutOfRangeError?

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in execution_function(input_fn) 84 # numpy translates Tensors to values in Eager mode. 85 return nest.map_structure(_non_none_constant_value, ---> 86 distributed_function(input_fn)) 87 88 return execution_function

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\def_function.py in call(self, *args, **kwds) 455 456 tracing_count = self._get_tracing_count() --> 457 result = self._call(*args, **kwds) 458 if tracing_count == self._get_tracing_count(): 459 self._call_counter.called_without_tracing()

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\def_function.py in _call(self, *args, **kwds) 501 # This is the first call of call, so we have to initialize. 502 initializer_map = object_identity.ObjectIdentityDictionary() --> 503 self._initialize(args, kwds, add_initializers_to=initializer_map) 504 finally: 505 # At this point we know that the initialization is complete (or less

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to) 406 self._concrete_stateful_fn = ( 407 self._stateful_fn._get_concrete_function_internal_garbage_collected(

pylint: disable=protected-access

--> 408 *args, **kwds)) 409 410 def invalid_creator_scope(*unused_args, **unused_kwds):

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs) 1846 if self.input_signature: 1847 args, kwargs = None, None -> 1848 graph_function, _, _ = self._maybe_define_function(args, kwargs) 1849 return graph_function 1850

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\function.py in _maybe_define_function(self, args, kwargs) 2148
graph_function = self._function_cache.primary.get(cache_key, None)
2149 if graph_function is None: -> 2150 graph_function = self._create_graph_function(args, kwargs) 2151 self._function_cache.primary[cache_key] = graph_function 2152 return graph_function, args, kwargs

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes) 2039 arg_names=arg_names,
2040 override_flat_arg_shapes=override_flat_arg_shapes, -> 2041 capture_by_value=self._capture_by_value), 2042 self._function_attributes, 2043 # Tell the ConcreteFunction to clean up its graph once it goes out of

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes) 913 converted_func) 914 --> 915 func_outputs = python_func(*func_args, **func_kwargs) 916 917 # invariant: func_outputs contains only Tensors, CompositeTensors,

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\def_function.py in wrapped_fn(*args, **kwds) 356 # wrapped allows AutoGraph to swap in a converted function. We give 357 # the function a weak reference to itself to avoid a reference cycle. --> 358 return weak_wrapped_fn().wrapped(*args, **kwds) 359 weak_wrapped_fn = weakref.ref(wrapped_fn) 360

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in distributed_function(input_iterator) 71 strategy = distribution_strategy_context.get_strategy() 72 outputs = strategy.experimental_run_v2( ---> 73 per_replica_function, args=(model, x, y, sample_weights)) 74 # Out of PerReplica outputs reduce or pick values to return. 75 all_outputs = dist_utils.unwrap_output_dict(

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py in experimental_run_v2(self, fn, args, kwargs) 758 fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx(), 759 convert_by_default=False) --> 760 return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs) 761 762 def reduce(self, reduce_op, value, axis):

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py in call_for_each_replica(self, fn, args, kwargs) 1785 kwargs = {} 1786 with self._container_strategy().scope(): -> 1787 return self._call_for_each_replica(fn, args, kwargs) 1788 1789 def _call_for_each_replica(self, fn, args, kwargs):

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py in _call_for_each_replica(self, fn, args, kwargs) 2130
self._container_strategy(), 2131
replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)): -> 2132 return fn(*args, **kwargs) 2133 2134 def _reduce_to(self, reduce_op, value, destinations):

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\autograph\impl\api.py in wrapper(*args, **kwargs) 290 def wrapper(*args, **kwargs): 291 with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED): --> 292 return func(*args, **kwargs) 293 294 if inspect.isfunction(func) or inspect.ismethod(func):

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in _predict_on_batch(failed resolving arguments) 160 def _predict_on_batch(model, x, y=None, sample_weights=None): 161 del y, sample_weights --> 162 return predict_on_batch(model, x) 163 164 func = _predict_on_batch

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in predict_on_batch(model, x) 357 # Validate and standardize user data. 358 inputs, _, _ = model._standardize_user_data( --> 359 x, extract_tensors_from_dataset=True) 360 361 # If model._distribution_strategy is True, then we are in a replica context

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset) 2470 feed_input_shapes,
2471 check_batch_axis=False, # Don't enforce the batch size. -> 2472 exception_prefix='input') 2473 2474 # Get typespecs for the input data and sanitize it if necessary.

~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)

 563                            ': expected ' + names[i] + ' to have ' +
 564                            str(len(shape)) + ' dimensions, but got array '

--> 565 'with shape ' + str(data_shape)) 566 if not check_batch_axis: 567 data_shape = data_shape[1:]

ValueError: Error when checking input: expected conv2d_input to have 4 dimensions, but got array with shape (None, 1)

2个回答

2
首先,你第一个卷积层的input_shape似乎有误。 input_shape = (IMG_SIZE, IMG_SIZE,1) model.add(Conv2D(64,(3,3), input_shape = (IMG_SIZE, IMG_SIZE,1)) 其次,不需要为任何中间层指定input_shape。 model.add(Conv2D(64,(3,3), input_shape = x.shape[1:IMG_SIZE])) 应改为 model.add(Conv2D(64,(3,3))

我按照你的建议进行了更改,但仍然出现相同的错误。 - Kim404

2

你正在尝试对一些字符串['test1.jpg']进行预测,为什么?你需要准备的数据与训练时使用的形状和分布相同 - 例如 - 加载图像,将其调整/裁剪到相关大小,将其归一化为在范围内[0,1](如果这是你在训练期间所做的)等等...


是的,我刚意识到我没有按照你说的准备好数据,谢谢你替我看到了这一点! - Kim404

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接