Finally found a way to identify objects in a picture. The neural network to do this is called Yolo. Here is a blog post about how to use Yolo in R.

Object detection in just 3 lines of R code using Tiny YOLO

I used the devtools install that is given in this blog post and it worked.

devtools::install_github("bnosac/image", subdir = "image.darknet", build_vignettes = TRUE)
library(image.darknet)
yolo_tiny_voc <- image_darknet_model(type = 'detect', model = "tiny-yolo-voc.cfg", 
        weights = system.file(package="image.darknet", "models", "tiny-yolo-voc.weights"), 
        labels = system.file(package="image.darknet", "include", "darknet", "data", "voc.names"))
x <- image_darknet_detect(file = "/home/esuess/classes/2018-2019/02 - Spring 2019/Stat654/Final/google_car.png", 
                          object = yolo_tiny_voc,
                          threshold = 0.19)
layer     filters    size              input                output
    0 conv     16  3 x 3 / 1   416 x 416 x   3   ->   416 x 416 x  16
    1 max          2 x 2 / 2   416 x 416 x  16   ->   208 x 208 x  16
    2 conv     32  3 x 3 / 1   208 x 208 x  16   ->   208 x 208 x  32
    3 max          2 x 2 / 2   208 x 208 x  32   ->   104 x 104 x  32
    4 conv     64  3 x 3 / 1   104 x 104 x  32   ->   104 x 104 x  64
    5 max          2 x 2 / 2   104 x 104 x  64   ->    52 x  52 x  64
    6 conv    128  3 x 3 / 1    52 x  52 x  64   ->    52 x  52 x 128
    7 max          2 x 2 / 2    52 x  52 x 128   ->    26 x  26 x 128
    8 conv    256  3 x 3 / 1    26 x  26 x 128   ->    26 x  26 x 256
    9 max          2 x 2 / 2    26 x  26 x 256   ->    13 x  13 x 256
   10 conv    512  3 x 3 / 1    13 x  13 x 256   ->    13 x  13 x 512
   11 max          2 x 2 / 1    13 x  13 x 512   ->    13 x  13 x 512
   12 conv   1024  3 x 3 / 1    13 x  13 x 512   ->    13 x  13 x1024
   13 conv   1024  3 x 3 / 1    13 x  13 x1024   ->    13 x  13 x1024
   14 conv    125  1 x 1 / 1    13 x  13 x1024   ->    13 x  13 x 125
   15 detection
Loading weights from /home/esuess/R/x86_64-pc-linux-gnu-library/3.4/image.darknet/models/tiny-yolo-voc.weights...Done!
/home/esuess/classes/2018-2019/02 - Spring 2019/Stat654/Final/google_car.png: Predicted in 2.440059 seconds.
Boxes: 845 of which 4 above the threshold.
person: 24%
car: 96%
person: 40%
bicycle: 50%

x <- image_darknet_detect(file = "/home/esuess/classes/2018-2019/02 - Spring 2019/Stat654/Final/busax.jpg", 
                          object = yolo_tiny_voc,
                          threshold = 0.25)
layer     filters    size              input                output
    0 conv     16  3 x 3 / 1   416 x 416 x   3   ->   416 x 416 x  16
    1 max          2 x 2 / 2   416 x 416 x  16   ->   208 x 208 x  16
    2 conv     32  3 x 3 / 1   208 x 208 x  16   ->   208 x 208 x  32
    3 max          2 x 2 / 2   208 x 208 x  32   ->   104 x 104 x  32
    4 conv     64  3 x 3 / 1   104 x 104 x  32   ->   104 x 104 x  64
    5 max          2 x 2 / 2   104 x 104 x  64   ->    52 x  52 x  64
    6 conv    128  3 x 3 / 1    52 x  52 x  64   ->    52 x  52 x 128
    7 max          2 x 2 / 2    52 x  52 x 128   ->    26 x  26 x 128
    8 conv    256  3 x 3 / 1    26 x  26 x 128   ->    26 x  26 x 256
    9 max          2 x 2 / 2    26 x  26 x 256   ->    13 x  13 x 256
   10 conv    512  3 x 3 / 1    13 x  13 x 256   ->    13 x  13 x 512
   11 max          2 x 2 / 1    13 x  13 x 512   ->    13 x  13 x 512
   12 conv   1024  3 x 3 / 1    13 x  13 x 512   ->    13 x  13 x1024
   13 conv   1024  3 x 3 / 1    13 x  13 x1024   ->    13 x  13 x1024
   14 conv    125  1 x 1 / 1    13 x  13 x1024   ->    13 x  13 x 125
   15 detection
Loading weights from /home/esuess/R/x86_64-pc-linux-gnu-library/3.4/image.darknet/models/tiny-yolo-voc.weights...Done!
/home/esuess/classes/2018-2019/02 - Spring 2019/Stat654/Final/busax.jpg: Predicted in 2.479083 seconds.
Boxes: 845 of which 4 above the threshold.
train: 73%
bus: 28%
person: 86%
person: 55%

LS0tCnRpdGxlOiAiWW9sbyBFeGFtcGxlIgpvdXRwdXQ6CiAgaHRtbF9ub3RlYm9vazogZGVmYXVsdAogIHBkZl9kb2N1bWVudDogZGVmYXVsdAotLS0KCkZpbmFsbHkgZm91bmQgYSB3YXkgdG8gaWRlbnRpZnkgb2JqZWN0cyBpbiBhIHBpY3R1cmUuICBUaGUgbmV1cmFsIG5ldHdvcmsgdG8gZG8gdGhpcyBpcyBjYWxsZWQgWW9sby4gIEhlcmUgaXMgYSBibG9nIHBvc3QgYWJvdXQgaG93IHRvIHVzZSBZb2xvIGluIFIuICAKCltPYmplY3QgZGV0ZWN0aW9uIGluIGp1c3QgMyBsaW5lcyBvZiBSIGNvZGUgdXNpbmcgVGlueSBZT0xPXShodHRwczovL2hlYXJ0YmVhdC5mcml0ei5haS9vYmplY3QtZGV0ZWN0aW9uLWluLWp1c3QtMy1saW5lcy1vZi1yLWNvZGUtdXNpbmctdGlueS15b2xvLWI1YTE2ZTUwZThhMCkKCkkgdXNlZCB0aGUgZGV2dG9vbHMgaW5zdGFsbCB0aGF0IGlzIGdpdmVuIGluIHRoaXMgYmxvZyBwb3N0IGFuZCBpdCB3b3JrZWQuCgpgYGB7ciwgZXZhbD1GQUxTRX0KZGV2dG9vbHM6Omluc3RhbGxfZ2l0aHViKCJibm9zYWMvaW1hZ2UiLCBzdWJkaXIgPSAiaW1hZ2UuZGFya25ldCIsIGJ1aWxkX3ZpZ25ldHRlcyA9IFRSVUUpCmBgYAoKCmBgYHtyfQpsaWJyYXJ5KGltYWdlLmRhcmtuZXQpCmBgYAoKCmBgYHtyfQp5b2xvX3Rpbnlfdm9jIDwtIGltYWdlX2RhcmtuZXRfbW9kZWwodHlwZSA9ICdkZXRlY3QnLCBtb2RlbCA9ICJ0aW55LXlvbG8tdm9jLmNmZyIsIAogICAgICAgIHdlaWdodHMgPSBzeXN0ZW0uZmlsZShwYWNrYWdlPSJpbWFnZS5kYXJrbmV0IiwgIm1vZGVscyIsICJ0aW55LXlvbG8tdm9jLndlaWdodHMiKSwgCiAgICAgICAgbGFiZWxzID0gc3lzdGVtLmZpbGUocGFja2FnZT0iaW1hZ2UuZGFya25ldCIsICJpbmNsdWRlIiwgImRhcmtuZXQiLCAiZGF0YSIsICJ2b2MubmFtZXMiKSkKCnggPC0gaW1hZ2VfZGFya25ldF9kZXRlY3QoZmlsZSA9ICIvaG9tZS9lc3Vlc3MvY2xhc3Nlcy8yMDE4LTIwMTkvMDIgLSBTcHJpbmcgMjAxOS9TdGF0NjU0L0ZpbmFsL2dvb2dsZV9jYXIucG5nIiwgCiAgICAgICAgICAgICAgICAgICAgICAgICAgb2JqZWN0ID0geW9sb190aW55X3ZvYywKICAgICAgICAgICAgICAgICAgICAgICAgICB0aHJlc2hvbGQgPSAwLjE5KQpgYGAKCgpgYGB7ciwgZWNobz1GQUxTRX0Ka25pdHI6OmluY2x1ZGVfZ3JhcGhpY3MoJy9ob21lL2VzdWVzcy9jbGFzc2VzLzIwMTgtMjAxOS8wMiAtIFNwcmluZyAyMDE5L1N0YXQ2NTQvRmluYWwvcHJlZGljdGlvbnMucG5nJykKYGBgCgoKYGBge3J9CnggPC0gaW1hZ2VfZGFya25ldF9kZXRlY3QoZmlsZSA9ICIvaG9tZS9lc3Vlc3MvY2xhc3Nlcy8yMDE4LTIwMTkvMDIgLSBTcHJpbmcgMjAxOS9TdGF0NjU0L0ZpbmFsL2J1c2F4LmpwZyIsIAogICAgICAgICAgICAgICAgICAgICAgICAgIG9iamVjdCA9IHlvbG9fdGlueV92b2MsCiAgICAgICAgICAgICAgICAgICAgICAgICAgdGhyZXNob2xkID0gMC4yNSkKYGBgCgpgYGB7ciwgZWNobz1GQUxTRX0Ka25pdHI6OmluY2x1ZGVfZ3JhcGhpY3MoJy9ob21lL2VzdWVzcy9jbGFzc2VzLzIwMTgtMjAxOS8wMiAtIFNwcmluZyAyMDE5L1N0YXQ2NTQvRmluYWwvcHJlZGljdGlvbnMucG5nJykKYGBgCgo=