diff --git a/tank/all_models.csv b/tank/all_models.csv index 81ce5208..41bf9fb8 100644 --- a/tank/all_models.csv +++ b/tank/all_models.csv @@ -35,18 +35,12 @@ squeezenet1_0,linalg,torch,1e-2,1e-3,default,nhcw-nhwc,False,False,False,"","mac wide_resnet50_2,linalg,torch,1e-2,1e-3,default,nhcw-nhwc/img2col,False,False,False,"","macos" efficientnet-v2-s,mhlo,tf,1e-02,1e-3,default,nhcw-nhwc,False,False,False,"","macos" mnasnet1_0,linalg,torch,1e-2,1e-3,default,nhcw-nhwc,True,True,True,"","macos" -t5-base,linalg,torch,1e-2,1e-3,default,None,False,False,False,"","" +t5-base,linalg,torch,1e-2,1e-3,default,None,True,True,True,"","" t5-base,mhlo,tf,1e-2,1e-3,default,None,False,False,False,"","" -t5-large,linalg,torch,1e-2,1e-3,default,None,False,False,False,"","" +t5-large,linalg,torch,1e-2,1e-3,default,None,True,True,True,"","" t5-large,mhlo,tf,1e-2,1e-3,default,None,False,False,False,"","" -efficientnet_b0,linalg,torch,1e-2,1e-3,default,nhcw-nhwc,False,False,False,"","" +efficientnet_b0,linalg,torch,1e-2,1e-3,default,nhcw-nhwc,False,True,False,"","" efficientnet_b7,linalg,torch,1e-2,1e-3,default,nhcw-nhwc,False,False,False,"","" efficientnet_b0,mhlo,tf,1e-2,1e-3,default,None,nhcw-nhwc,False,False,False,"","" efficientnet_b7,mhlo,tf,1e-2,1e-3,default,None,nhcw-nhwc,False,False,False,"","" -efficientnet_b0,mhlo,tf,1e-2,1e-3,default,None,nhcw-nhwc,False,False,"","" -efficientnet_b7,mhlo,tf,1e-2,1e-3,default,None,nhcw-nhwc,False,False,"","" gpt2,mhlo,tf,1e-2,1e-3,default,None,False,False,False,"","" -t5-base,linalg,torch,1e-2,1e-3,default,None,False,False,False,"","" -t5-base,mhlo,tf,1e-2,1e-3,default,None,False,False,False,"","" -t5-large,linalg,torch,1e-2,1e-3,default,None,False,False,False,"","" -t5-large,mhlo,tf,1e-2,1e-3,default,None,False,False,False,"","" diff --git a/tank/torch_model_list.csv b/tank/torch_model_list.csv index 3dab8ad2..b3ce59bb 100644 --- a/tank/torch_model_list.csv +++ b/tank/torch_model_list.csv @@ -19,9 +19,5 @@ mnasnet1_0,False,vision,True,-,"cnn, torchvision, mobile, architecture-search"," resnet50_fp16,False,vision,True,23M,"cnn,image-classification,residuals,resnet-variant","Bottlenecks with only conv2d (1x1 conv -> 3x3 conv -> 1x1 conv blocks)" bert-base-uncased_fp16,True,fp16,False,109M,"nlp;bert-variant;transformer-encoder","12 layers; 768 hidden; 12 attention heads" bert-large-uncased,True,hf,True,330M,"nlp;bert-variant;transformer-encoder","24 layers, 1024 hidden units, 16 attention heads" -t5-base,True,hf_seq2seq,True,220M,"nlp;transformer-encoder;transformer-decoder","Text-to-Text Transfer Transformer" -t5-large,True,hf_seq2seq,True,770M,"nlp;transformer-encoder;transformer-decoder","Text-to-Text Transfer Transformer" efficientnet_b0,True,vision,False,5.3M,"image-classification;cnn;conv2d;depthwise-conv","Smallest EfficientNet variant with 224x224 input" efficientnet_b7,True,vision,False,66M,"image-classification;cnn;conv2d;depthwise-conv","Largest EfficientNet variant with 600x600 input" -t5-base,True,hf_seq2seq,True,220M,"nlp;transformer-encoder;transformer-decoder","Text-to-Text Transfer Transformer" -t5-large,True,hf_seq2seq,True,770M,"nlp;transformer-encoder;transformer-decoder","Text-to-Text Transfer Transformer"