from ola_vlm.train.sherlock_dsg_train import train import torch.multiprocessing as mp if __name__ == "__main__": # mp.set_start_method('spawn') # try: # train(attn_implementation="flash_attention_2") # except: train(attn_implementation="eager")