can_use_sdp=hasattr(torch.nn.functional,"scaled_dot_product_attention")andcallable(getattr(torch.nn.functional,"scaled_dot_product_attention"))# not everyone has torch 2.x to use sdp
parser.add_argument("--opt-split-attention-invokeai",action='store_true',help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1",action='store_true',help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-nan-check",action='store_true',help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu",nargs='+',help="use CPU as torch device for specified modules",default=[],type=str.lower)