我使用自己的数据训练了 Yolo v3,但预测显示 class 我没有训练

I trained Yolo v3 using my own data, but prediction shows class I did not train

我正在尝试制作一个 Yolo v3 模型来检测每种颜色的交通信号灯。

所以我开始使用我自己的红绿灯数据训练Yolo v3。

我设置的class是六个

红灯、绿灯、黄灯,左转,直行,左转,右转。 (我会做这 4 个 classes。)

经过训练,我运行 Yolo v3 detector 通过我自己的Weight和cfg文件。

但是预测结果出乎意料!

它的class与我设置的无关。比如汽车、飞机、摩托车...

这有什么问题?

我正在使用 Ubuntu 16.04 LTS 和 GForce RTX 2080。还有 OpenCV、CUDA、CuDNN。 所以darknet的makefile是这样设置的

    GPU=1
    CUDNN=1
    OPENCV=1
    OPENMP=0
    DEBUG=0
    ...

我使用了预试权重 darknet19_448.conv.23。 听说需要。

为我设置的训练添加我的 cfg 文件。

    [net]
    # Testing
    #batch=1
    #subdivisions=1
    # Training
    batch=24
    subdivisions=8
    width=416
    height=416
    channels=3
    momentum=0.9
    decay=0.0005
    angle=0
    saturation = 1.5
    exposure = 1.5
    hue=.1

    learning_rate=0.001
    burn_in=1000
    max_batches = 20000
    policy=steps
    steps=400000,450000
    scales=.1,.1

    [convolutional]
    batch_normalize=1
    filters=32
    size=3
    stride=1
    pad=1
    activation=leaky

    # Downsample

    [convolutional]
    batch_normalize=1
    filters=64
    size=3
    stride=2
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=32
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=64
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    # Downsample

    [convolutional]
    batch_normalize=1
    filters=128
    size=3
    stride=2
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=64
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=128
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=64
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=128
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    # Downsample

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=2
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    # Downsample

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=2
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear


    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    # Downsample

    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=2
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=1024
    size=3
    stride=1
    pad=1
    activation=leaky

    [shortcut]
    from=-3
    activation=linear

    ######################

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=512
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=1024
    activation=leaky

    [convolutional]
    size=1
    stride=1
    pad=1
    filters=33
    activation=linear


    [yolo]
    mask = 6,7,8
    anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
    classes=6
    num=9
    jitter=.3
    ignore_thresh = .7
    truth_thresh = 1
    random=1


    [route]
    layers = -4

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [upsample]
    stride=2

    [route]
    layers = -1, 61



    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=512
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=512
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=256
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=512
    activation=leaky

    [convolutional]
    size=1
    stride=1
    pad=1
    filters=33
    activation=linear


    [yolo]
    mask = 3,4,5
    anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
    classes=6
    num=9
    jitter=.3
    ignore_thresh = .7
    truth_thresh = 1
    random=1



    [route]
    layers = -4

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [upsample]
    stride=2

    [route]
    layers = -1, 36



    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=256
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=256
    activation=leaky

    [convolutional]
    batch_normalize=1
    filters=128
    size=1
    stride=1
    pad=1
    activation=leaky

    [convolutional]
    batch_normalize=1
    size=3
    stride=1
    pad=1
    filters=256
    activation=leaky

    [convolutional]
    size=1
    stride=1
    pad=1
    filters=33
    activation=linear


    [yolo]
    mask = 0,1,2
    anchors = 10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326
    classes=6
    num=9
    jitter=.3
    ignore_thresh = .7
    truth_thresh = 1
    random=1

如果你能回答我,我将不胜感激。

您使用的是什么存储库?

还应该有一些 .data.names 文件需要更改。以 this page 为例。您需要更改过滤器数量classes数量和class名称 在训练你的模型之前。您正在使用的模型可能已经在其他一些数据集上进行了预训练以进行车辆检测。