experiment_name: "coco_test"
model_options {
# Update the path to the initial checkpoint (e.g., ImageNet
# pretrained checkpoint).
initial_checkpoint: "/home/caixiaoni/Desktop/project/resnet50_os16_panoptic_deeplab_coco_train/ckpt-200000"
backbone {
name: "resnet50"
output_stride: 16
}
decoder {
feature_key: "res5"
decoder_channels: 256
aspp_channels: 256
atrous_rates: 6
atrous_rates: 12
atrous_rates: 18
}
panoptic_deeplab {
low_level {
feature_key: "res3"
channels_project: 64
}
low_level {
feature_key: "res2"
channels_project: 32
}
instance {
low_level_override {
feature_key: "res3"
channels_project: 32
}
low_level_override {
feature_key: "res2"
channels_project: 16
}
instance_decoder_override {
feature_key: "res5"
decoder_channels: 128
atrous_rates: 6
atrous_rates: 12
atrous_rates: 18
}
center_head {
output_channels: 1
head_channels: 32
}
regression_head {
output_channels: 2
head_channels: 32
}
}
semantic_head {
output_channels: 134
head_channels: 256
}
}
}
trainer_options {
save_checkpoints_steps: 1000
save_summaries_steps: 100
steps_per_loop: 100
loss_options {
semantic_loss {
name: "softmax_cross_entropy"
weight: 1.0
top_k_percent: 0.2
}
center_loss {
name: "mse"
weight: 200
}
regression_loss {
name: "l1"
weight: 0.01
}
}
solver_options {
base_learning_rate: 0.0005
training_number_of_steps: 200000
warmup_steps: 2000
}
}
train_dataset_options {
dataset: "metal_part"
# Update the path to training set.
file_pattern: "/home/caixiaoni/Desktop/project/part-TFRecord/train-*.tfrecord"
# Adjust the batch_size accordingly to better fit your GPU/TPU memory.
# Also see Q1 in g3doc/faq.md.
batch_size: 16
crop_size: 513
crop_size: 513
min_resize_value: 513
max_resize_value: 513
augmentations {
min_scale_factor: 0.5
max_scale_factor: 1.5
scale_factor_step_size: 0.1
autoaugment_policy_name: "simple_classification_policy_magnitude_scale_0.2"
}
increase_small_instance_weights: true
small_instance_weight: 3.0
}
eval_dataset_options {
dataset: "metal_part"
# Update the path to validation set.
file_pattern: "/home/caixiaoni/Desktop/project/part-TFRecord/val-*.tfrecord"
batch_size: 1
crop_size: 513
crop_size: 513
min_resize_value: 513
max_resize_value: 513
# Add options to make the evaluation loss comparable to the training loss.
increase_small_instance_weights: true
small_instance_weight: 3.0
}
evaluator_options {
continuous_eval_timeout: -1
stuff_area_limit: 4096
center_score_threshold: 0.1
nms_kernel: 41
save_predictions: true
save_raw_predictions: false
# Use pure tf functions (i.e., no CUDA kernel) to merge semantic and
# instance maps. For faster speed, compile TensorFlow with provided kernel
# implementation under the folder `tensorflow_ops`, and set
# merge_semantic_and_instance_with_tf_op to true.
merge_semantic_and_instance_with_tf_op: false
}
I currently used ubuntu-18.04 and have tried with the same code but different path using MacOS system, there isn't error when reading the config file but Ubuntu has. But Mac doesn't have Nvidia GPU such that I couldn't use.
I assume the problem in the config file, could anyone give me some suggestions? I appreciate for any hints! Thanks in advance!