Saving weights and logs of step 1000
Browse files- events.out.tfevents.1642691672.t1v-n-00e295a4-w-0.2612942.0.v2 +3 -0
- flax_model.msgpack +1 -1
- wandb/debug-internal.log +1 -1
- wandb/debug.log +1 -1
- wandb/latest-run +1 -1
- wandb/run-20220120_135649-r5vgtpr6/files/config.yaml +3 -0
- wandb/run-20220120_135649-r5vgtpr6/files/output.log +507 -0
- wandb/run-20220120_135649-r5vgtpr6/files/wandb-summary.json +1 -1
- wandb/run-20220120_135649-r5vgtpr6/logs/debug-internal.log +0 -0
- wandb/run-20220120_135649-r5vgtpr6/logs/debug.log +107 -0
- wandb/run-20220120_135649-r5vgtpr6/run-r5vgtpr6.wandb +0 -0
- wandb/run-20220120_151428-zjqubvsf/files/code/run_mlm_flax.py +815 -0
- wandb/run-20220120_151428-zjqubvsf/files/config.yaml +147 -0
- wandb/run-20220120_151428-zjqubvsf/files/diff.patch +0 -0
- wandb/run-20220120_151428-zjqubvsf/files/events.out.tfevents.1642691672.t1v-n-00e295a4-w-0.2612942.0.v2 +1 -0
- wandb/run-20220120_151428-zjqubvsf/files/output.log +872 -0
- wandb/run-20220120_151428-zjqubvsf/files/requirements.txt +122 -0
- wandb/run-20220120_151428-zjqubvsf/files/wandb-metadata.json +48 -0
- wandb/run-20220120_151428-zjqubvsf/files/wandb-summary.json +1 -0
- wandb/run-20220120_151428-zjqubvsf/logs/debug-internal.log +0 -0
- wandb/run-20220120_151428-zjqubvsf/logs/debug.log +26 -0
- wandb/run-20220120_151428-zjqubvsf/run-zjqubvsf.wandb +0 -0
events.out.tfevents.1642691672.t1v-n-00e295a4-w-0.2612942.0.v2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e73a337f3e7c0b40bc214faf98558dd143f10d4116096f86a80a17ecea05d78
|
3 |
+
size 147136
|
flax_model.msgpack
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 498796983
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8d8b6d4f550a1f9fb5de2517c1f10b4896c371caf13c880fb77e5413732f1fee
|
3 |
size 498796983
|
wandb/debug-internal.log
CHANGED
@@ -1 +1 @@
|
|
1 |
-
run-
|
|
|
1 |
+
run-20220120_151428-zjqubvsf/logs/debug-internal.log
|
wandb/debug.log
CHANGED
@@ -1 +1 @@
|
|
1 |
-
run-
|
|
|
1 |
+
run-20220120_151428-zjqubvsf/logs/debug.log
|
wandb/latest-run
CHANGED
@@ -1 +1 @@
|
|
1 |
-
run-
|
|
|
1 |
+
run-20220120_151428-zjqubvsf
|
wandb/run-20220120_135649-r5vgtpr6/files/config.yaml
CHANGED
@@ -15,6 +15,9 @@ _wandb:
|
|
15 |
1:
|
16 |
- 11
|
17 |
- 12
|
|
|
|
|
|
|
18 |
4: 3.8.10
|
19 |
5: 0.12.9
|
20 |
6: 4.16.0.dev0
|
|
|
15 |
1:
|
16 |
- 11
|
17 |
- 12
|
18 |
+
2:
|
19 |
+
- 11
|
20 |
+
- 12
|
21 |
4: 3.8.10
|
22 |
5: 0.12.9
|
23 |
6: 4.16.0.dev0
|
wandb/run-20220120_135649-r5vgtpr6/files/output.log
CHANGED
@@ -1691,3 +1691,510 @@ Training...: 4%|████████▎
|
|
1691 |
|
1692 |
|
1693 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1691 |
|
1692 |
|
1693 |
|
1694 |
+
|
1695 |
+
|
1696 |
+
|
1697 |
+
|
1698 |
+
|
1699 |
+
|
1700 |
+
|
1701 |
+
|
1702 |
+
|
1703 |
+
|
1704 |
+
|
1705 |
+
|
1706 |
+
|
1707 |
+
|
1708 |
+
|
1709 |
+
|
1710 |
+
|
1711 |
+
|
1712 |
+
|
1713 |
+
|
1714 |
+
|
1715 |
+
|
1716 |
+
|
1717 |
+
|
1718 |
+
|
1719 |
+
|
1720 |
+
|
1721 |
+
|
1722 |
+
|
1723 |
+
|
1724 |
+
|
1725 |
+
|
1726 |
+
|
1727 |
+
|
1728 |
+
|
1729 |
+
|
1730 |
+
|
1731 |
+
|
1732 |
+
|
1733 |
+
|
1734 |
+
|
1735 |
+
|
1736 |
+
|
1737 |
+
|
1738 |
+
|
1739 |
+
|
1740 |
+
|
1741 |
+
|
1742 |
+
|
1743 |
+
|
1744 |
+
|
1745 |
+
|
1746 |
+
|
1747 |
+
|
1748 |
+
|
1749 |
+
|
1750 |
+
|
1751 |
+
|
1752 |
+
|
1753 |
+
|
1754 |
+
|
1755 |
+
|
1756 |
+
|
1757 |
+
|
1758 |
+
|
1759 |
+
|
1760 |
+
|
1761 |
+
|
1762 |
+
|
1763 |
+
|
1764 |
+
|
1765 |
+
|
1766 |
+
|
1767 |
+
|
1768 |
+
|
1769 |
+
|
1770 |
+
|
1771 |
+
|
1772 |
+
|
1773 |
+
|
1774 |
+
|
1775 |
+
|
1776 |
+
|
1777 |
+
|
1778 |
+
|
1779 |
+
|
1780 |
+
|
1781 |
+
|
1782 |
+
|
1783 |
+
|
1784 |
+
|
1785 |
+
|
1786 |
+
|
1787 |
+
|
1788 |
+
|
1789 |
+
|
1790 |
+
|
1791 |
+
|
1792 |
+
|
1793 |
+
|
1794 |
+
|
1795 |
+
|
1796 |
+
|
1797 |
+
|
1798 |
+
|
1799 |
+
|
1800 |
+
|
1801 |
+
|
1802 |
+
|
1803 |
+
|
1804 |
+
|
1805 |
+
|
1806 |
+
|
1807 |
+
|
1808 |
+
|
1809 |
+
|
1810 |
+
|
1811 |
+
|
1812 |
+
|
1813 |
+
|
1814 |
+
|
1815 |
+
|
1816 |
+
|
1817 |
+
|
1818 |
+
|
1819 |
+
|
1820 |
+
|
1821 |
+
|
1822 |
+
|
1823 |
+
|
1824 |
+
|
1825 |
+
|
1826 |
+
|
1827 |
+
|
1828 |
+
|
1829 |
+
|
1830 |
+
|
1831 |
+
|
1832 |
+
|
1833 |
+
|
1834 |
+
|
1835 |
+
|
1836 |
+
|
1837 |
+
|
1838 |
+
|
1839 |
+
|
1840 |
+
|
1841 |
+
|
1842 |
+
|
1843 |
+
|
1844 |
+
|
1845 |
+
|
1846 |
+
|
1847 |
+
|
1848 |
+
|
1849 |
+
|
1850 |
+
|
1851 |
+
|
1852 |
+
|
1853 |
+
|
1854 |
+
|
1855 |
+
|
1856 |
+
|
1857 |
+
|
1858 |
+
|
1859 |
+
|
1860 |
+
|
1861 |
+
|
1862 |
+
|
1863 |
+
|
1864 |
+
|
1865 |
+
|
1866 |
+
|
1867 |
+
|
1868 |
+
|
1869 |
+
|
1870 |
+
|
1871 |
+
|
1872 |
+
|
1873 |
+
|
1874 |
+
|
1875 |
+
|
1876 |
+
|
1877 |
+
|
1878 |
+
|
1879 |
+
|
1880 |
+
|
1881 |
+
|
1882 |
+
|
1883 |
+
|
1884 |
+
|
1885 |
+
|
1886 |
+
|
1887 |
+
|
1888 |
+
|
1889 |
+
|
1890 |
+
|
1891 |
+
|
1892 |
+
|
1893 |
+
|
1894 |
+
|
1895 |
+
|
1896 |
+
|
1897 |
+
|
1898 |
+
|
1899 |
+
|
1900 |
+
|
1901 |
+
|
1902 |
+
|
1903 |
+
|
1904 |
+
|
1905 |
+
|
1906 |
+
|
1907 |
+
|
1908 |
+
|
1909 |
+
|
1910 |
+
|
1911 |
+
|
1912 |
+
|
1913 |
+
|
1914 |
+
|
1915 |
+
|
1916 |
+
|
1917 |
+
|
1918 |
+
|
1919 |
+
|
1920 |
+
|
1921 |
+
|
1922 |
+
|
1923 |
+
|
1924 |
+
|
1925 |
+
|
1926 |
+
|
1927 |
+
|
1928 |
+
|
1929 |
+
|
1930 |
+
|
1931 |
+
|
1932 |
+
|
1933 |
+
|
1934 |
+
|
1935 |
+
|
1936 |
+
|
1937 |
+
|
1938 |
+
|
1939 |
+
|
1940 |
+
|
1941 |
+
|
1942 |
+
|
1943 |
+
|
1944 |
+
|
1945 |
+
|
1946 |
+
|
1947 |
+
|
1948 |
+
|
1949 |
+
|
1950 |
+
|
1951 |
+
|
1952 |
+
|
1953 |
+
|
1954 |
+
|
1955 |
+
|
1956 |
+
|
1957 |
+
|
1958 |
+
|
1959 |
+
|
1960 |
+
|
1961 |
+
|
1962 |
+
|
1963 |
+
|
1964 |
+
|
1965 |
+
|
1966 |
+
|
1967 |
+
|
1968 |
+
|
1969 |
+
|
1970 |
+
|
1971 |
+
|
1972 |
+
|
1973 |
+
|
1974 |
+
|
1975 |
+
|
1976 |
+
|
1977 |
+
|
1978 |
+
|
1979 |
+
|
1980 |
+
|
1981 |
+
|
1982 |
+
|
1983 |
+
|
1984 |
+
|
1985 |
+
|
1986 |
+
|
1987 |
+
|
1988 |
+
|
1989 |
+
|
1990 |
+
|
1991 |
+
|
1992 |
+
|
1993 |
+
|
1994 |
+
|
1995 |
+
|
1996 |
+
|
1997 |
+
|
1998 |
+
|
1999 |
+
|
2000 |
+
|
2001 |
+
|
2002 |
+
|
2003 |
+
|
2004 |
+
|
2005 |
+
|
2006 |
+
|
2007 |
+
|
2008 |
+
|
2009 |
+
|
2010 |
+
|
2011 |
+
|
2012 |
+
|
2013 |
+
|
2014 |
+
|
2015 |
+
|
2016 |
+
|
2017 |
+
|
2018 |
+
|
2019 |
+
|
2020 |
+
|
2021 |
+
|
2022 |
+
|
2023 |
+
|
2024 |
+
|
2025 |
+
|
2026 |
+
|
2027 |
+
|
2028 |
+
|
2029 |
+
|
2030 |
+
|
2031 |
+
|
2032 |
+
|
2033 |
+
|
2034 |
+
|
2035 |
+
|
2036 |
+
|
2037 |
+
|
2038 |
+
|
2039 |
+
|
2040 |
+
|
2041 |
+
|
2042 |
+
|
2043 |
+
|
2044 |
+
|
2045 |
+
|
2046 |
+
|
2047 |
+
|
2048 |
+
|
2049 |
+
|
2050 |
+
|
2051 |
+
|
2052 |
+
|
2053 |
+
|
2054 |
+
|
2055 |
+
|
2056 |
+
|
2057 |
+
|
2058 |
+
|
2059 |
+
|
2060 |
+
|
2061 |
+
|
2062 |
+
|
2063 |
+
|
2064 |
+
|
2065 |
+
|
2066 |
+
|
2067 |
+
|
2068 |
+
|
2069 |
+
|
2070 |
+
|
2071 |
+
|
2072 |
+
|
2073 |
+
|
2074 |
+
|
2075 |
+
|
2076 |
+
|
2077 |
+
|
2078 |
+
|
2079 |
+
|
2080 |
+
|
2081 |
+
|
2082 |
+
|
2083 |
+
|
2084 |
+
|
2085 |
+
|
2086 |
+
|
2087 |
+
|
2088 |
+
|
2089 |
+
|
2090 |
+
|
2091 |
+
|
2092 |
+
|
2093 |
+
|
2094 |
+
|
2095 |
+
|
2096 |
+
|
2097 |
+
|
2098 |
+
|
2099 |
+
|
2100 |
+
|
2101 |
+
|
2102 |
+
|
2103 |
+
|
2104 |
+
|
2105 |
+
|
2106 |
+
|
2107 |
+
|
2108 |
+
|
2109 |
+
|
2110 |
+
|
2111 |
+
|
2112 |
+
|
2113 |
+
|
2114 |
+
|
2115 |
+
|
2116 |
+
|
2117 |
+
|
2118 |
+
|
2119 |
+
|
2120 |
+
|
2121 |
+
|
2122 |
+
|
2123 |
+
|
2124 |
+
|
2125 |
+
|
2126 |
+
|
2127 |
+
|
2128 |
+
|
2129 |
+
|
2130 |
+
|
2131 |
+
|
2132 |
+
|
2133 |
+
|
2134 |
+
|
2135 |
+
|
2136 |
+
|
2137 |
+
|
2138 |
+
|
2139 |
+
|
2140 |
+
|
2141 |
+
|
2142 |
+
|
2143 |
+
|
2144 |
+
|
2145 |
+
|
2146 |
+
|
2147 |
+
|
2148 |
+
|
2149 |
+
|
2150 |
+
|
2151 |
+
|
2152 |
+
|
2153 |
+
|
2154 |
+
|
2155 |
+
|
2156 |
+
|
2157 |
+
|
2158 |
+
|
2159 |
+
|
2160 |
+
|
2161 |
+
|
2162 |
+
|
2163 |
+
|
2164 |
+
|
2165 |
+
|
2166 |
+
|
2167 |
+
|
2168 |
+
|
2169 |
+
|
2170 |
+
|
2171 |
+
|
2172 |
+
|
2173 |
+
|
2174 |
+
|
2175 |
+
|
2176 |
+
|
2177 |
+
|
2178 |
+
|
2179 |
+
|
2180 |
+
|
2181 |
+
|
2182 |
+
|
2183 |
+
|
2184 |
+
Step... (2000 | Loss: 1.330615520477295, Acc: 0.7090045213699341): 0%| | 0/3 [1:15:55<?, ?it/s]
|
2185 |
+
Traceback (most recent call last):
|
2186 |
+
File "run_mlm_flax.py", line 815, in <module>
|
2187 |
+
main()
|
2188 |
+
File "run_mlm_flax.py", line 718, in main
|
2189 |
+
samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx]
|
2190 |
+
File "run_mlm_flax.py", line 718, in <listcomp>
|
2191 |
+
samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx]
|
2192 |
+
File "/data/flax/lib/python3.8/site-packages/jax/_src/device_array.py", line 247, in <genexpr>
|
2193 |
+
return (sl for chunk in self._chunk_iter(100) for sl in chunk._unstack())
|
2194 |
+
File "/data/flax/lib/python3.8/site-packages/jax/_src/numpy/lax_numpy.py", line 6792, in _chunk_iter
|
2195 |
+
for i in range(num_chunks):
|
2196 |
+
File "/usr/lib/python3.8/functools.py", line 399, in _method
|
2197 |
+
return self.func(cls_or_self, *self.args, *args, **keywords)
|
2198 |
+
File "/data/flax/lib/python3.8/site-packages/jax/_src/device_array.py", line 41, in _forward_method
|
2199 |
+
return fun(getattr(self, attrname), *args)
|
2200 |
+
KeyboardInterrupt
|
wandb/run-20220120_135649-r5vgtpr6/files/wandb-summary.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{}
|
|
|
1 |
+
{"_wandb": {"runtime": 4565}}
|
wandb/run-20220120_135649-r5vgtpr6/logs/debug-internal.log
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
wandb/run-20220120_135649-r5vgtpr6/logs/debug.log
CHANGED
@@ -23,3 +23,110 @@ config: {}
|
|
23 |
2022-01-20 13:56:50,690 INFO MainThread:2512121 [wandb_run.py:_config_callback():956] config_cb None None {'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'per_device_train_batch_size': 46, 'per_device_eval_batch_size': 46, 'learning_rate': 0.00015, 'weight_decay': 0.01, 'adam_beta1': 0.9, 'adam_beta2': 0.98, 'adam_epsilon': 1e-06, 'adafactor': False, 'num_train_epochs': 3.0, 'warmup_steps': 1000, 'logging_steps': 1000, 'save_steps': 1000, 'eval_steps': 1000, 'seed': 42, 'push_to_hub': True, 'hub_model_id': None, 'hub_token': None}
|
24 |
2022-01-20 13:56:50,691 INFO MainThread:2512121 [wandb_run.py:_config_callback():956] config_cb None None {'model_name_or_path': 'versae/roberta-base-ncc', 'model_type': 'roberta', 'config_name': './', 'tokenizer_name': './', 'cache_dir': None, 'use_fast_tokenizer': True, 'dtype': 'bfloat16'}
|
25 |
2022-01-20 13:56:50,691 INFO MainThread:2512121 [wandb_run.py:_config_callback():956] config_cb None None {'dataset_name': 'NbAiLab/NCC', 'dataset_config_name': None, 'train_file': None, 'validation_file': None, 'train_ref_file': None, 'validation_ref_file': None, 'overwrite_cache': False, 'validation_split_percentage': 5, 'max_seq_length': 512, 'preprocessing_num_workers': None, 'mlm_probability': 0.15, 'pad_to_max_length': True, 'line_by_line': False}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
2022-01-20 13:56:50,690 INFO MainThread:2512121 [wandb_run.py:_config_callback():956] config_cb None None {'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'per_device_train_batch_size': 46, 'per_device_eval_batch_size': 46, 'learning_rate': 0.00015, 'weight_decay': 0.01, 'adam_beta1': 0.9, 'adam_beta2': 0.98, 'adam_epsilon': 1e-06, 'adafactor': False, 'num_train_epochs': 3.0, 'warmup_steps': 1000, 'logging_steps': 1000, 'save_steps': 1000, 'eval_steps': 1000, 'seed': 42, 'push_to_hub': True, 'hub_model_id': None, 'hub_token': None}
|
24 |
2022-01-20 13:56:50,691 INFO MainThread:2512121 [wandb_run.py:_config_callback():956] config_cb None None {'model_name_or_path': 'versae/roberta-base-ncc', 'model_type': 'roberta', 'config_name': './', 'tokenizer_name': './', 'cache_dir': None, 'use_fast_tokenizer': True, 'dtype': 'bfloat16'}
|
25 |
2022-01-20 13:56:50,691 INFO MainThread:2512121 [wandb_run.py:_config_callback():956] config_cb None None {'dataset_name': 'NbAiLab/NCC', 'dataset_config_name': None, 'train_file': None, 'validation_file': None, 'train_ref_file': None, 'validation_ref_file': None, 'overwrite_cache': False, 'validation_split_percentage': 5, 'max_seq_length': 512, 'preprocessing_num_workers': None, 'mlm_probability': 0.15, 'pad_to_max_length': True, 'line_by_line': False}
|
26 |
+
2022-01-20 15:12:53,717 INFO MainThread:2512121 [wandb_run.py:_atexit_cleanup():1780] got exitcode: 255
|
27 |
+
2022-01-20 15:12:53,718 INFO MainThread:2512121 [wandb_run.py:_restore():1752] restore
|
28 |
+
2022-01-20 15:12:55,905 INFO MainThread:2512121 [wandb_run.py:_wait_for_finish():1912] got exit ret: file_counts {
|
29 |
+
wandb_count: 2
|
30 |
+
other_count: 1
|
31 |
+
}
|
32 |
+
pusher_stats {
|
33 |
+
uploaded_bytes: 37926
|
34 |
+
total_bytes: 37926
|
35 |
+
}
|
36 |
+
|
37 |
+
2022-01-20 15:12:56,103 INFO MainThread:2512121 [wandb_run.py:_wait_for_finish():1912] got exit ret: file_counts {
|
38 |
+
wandb_count: 2
|
39 |
+
other_count: 1
|
40 |
+
}
|
41 |
+
pusher_stats {
|
42 |
+
uploaded_bytes: 37926
|
43 |
+
total_bytes: 37926
|
44 |
+
}
|
45 |
+
|
46 |
+
2022-01-20 15:12:56,618 INFO MainThread:2512121 [wandb_run.py:_wait_for_finish():1912] got exit ret: file_counts {
|
47 |
+
wandb_count: 6
|
48 |
+
other_count: 1
|
49 |
+
}
|
50 |
+
pusher_stats {
|
51 |
+
uploaded_bytes: 37926
|
52 |
+
total_bytes: 47361
|
53 |
+
}
|
54 |
+
|
55 |
+
2022-01-20 15:12:56,721 INFO MainThread:2512121 [wandb_run.py:_wait_for_finish():1912] got exit ret: file_counts {
|
56 |
+
wandb_count: 6
|
57 |
+
other_count: 1
|
58 |
+
}
|
59 |
+
pusher_stats {
|
60 |
+
uploaded_bytes: 37926
|
61 |
+
total_bytes: 47361
|
62 |
+
}
|
63 |
+
|
64 |
+
2022-01-20 15:12:56,823 INFO MainThread:2512121 [wandb_run.py:_wait_for_finish():1912] got exit ret: file_counts {
|
65 |
+
wandb_count: 6
|
66 |
+
other_count: 1
|
67 |
+
}
|
68 |
+
pusher_stats {
|
69 |
+
uploaded_bytes: 47361
|
70 |
+
total_bytes: 47361
|
71 |
+
}
|
72 |
+
|
73 |
+
2022-01-20 15:12:56,925 INFO MainThread:2512121 [wandb_run.py:_wait_for_finish():1912] got exit ret: file_counts {
|
74 |
+
wandb_count: 6
|
75 |
+
other_count: 1
|
76 |
+
}
|
77 |
+
pusher_stats {
|
78 |
+
uploaded_bytes: 47361
|
79 |
+
total_bytes: 47361
|
80 |
+
}
|
81 |
+
|
82 |
+
2022-01-20 15:12:57,027 INFO MainThread:2512121 [wandb_run.py:_wait_for_finish():1912] got exit ret: file_counts {
|
83 |
+
wandb_count: 6
|
84 |
+
other_count: 1
|
85 |
+
}
|
86 |
+
pusher_stats {
|
87 |
+
uploaded_bytes: 47361
|
88 |
+
total_bytes: 47361
|
89 |
+
}
|
90 |
+
|
91 |
+
2022-01-20 15:12:57,129 INFO MainThread:2512121 [wandb_run.py:_wait_for_finish():1912] got exit ret: file_counts {
|
92 |
+
wandb_count: 6
|
93 |
+
other_count: 1
|
94 |
+
}
|
95 |
+
pusher_stats {
|
96 |
+
uploaded_bytes: 47361
|
97 |
+
total_bytes: 47361
|
98 |
+
}
|
99 |
+
|
100 |
+
2022-01-20 15:12:57,231 INFO MainThread:2512121 [wandb_run.py:_wait_for_finish():1912] got exit ret: file_counts {
|
101 |
+
wandb_count: 6
|
102 |
+
other_count: 1
|
103 |
+
}
|
104 |
+
pusher_stats {
|
105 |
+
uploaded_bytes: 47361
|
106 |
+
total_bytes: 47361
|
107 |
+
}
|
108 |
+
|
109 |
+
2022-01-20 15:12:57,714 INFO MainThread:2512121 [wandb_run.py:_wait_for_finish():1912] got exit ret: file_counts {
|
110 |
+
wandb_count: 6
|
111 |
+
other_count: 1
|
112 |
+
}
|
113 |
+
pusher_stats {
|
114 |
+
uploaded_bytes: 47361
|
115 |
+
total_bytes: 47361
|
116 |
+
}
|
117 |
+
|
118 |
+
2022-01-20 15:12:58,080 INFO MainThread:2512121 [wandb_run.py:_wait_for_finish():1912] got exit ret: done: true
|
119 |
+
exit_result {
|
120 |
+
}
|
121 |
+
file_counts {
|
122 |
+
wandb_count: 6
|
123 |
+
other_count: 1
|
124 |
+
}
|
125 |
+
pusher_stats {
|
126 |
+
uploaded_bytes: 47361
|
127 |
+
total_bytes: 47361
|
128 |
+
}
|
129 |
+
local_info {
|
130 |
+
}
|
131 |
+
|
132 |
+
2022-01-20 15:12:59,238 INFO MainThread:2512121 [wandb_run.py:_append_files():2180] logging synced files
|
wandb/run-20220120_135649-r5vgtpr6/run-r5vgtpr6.wandb
CHANGED
Binary files a/wandb/run-20220120_135649-r5vgtpr6/run-r5vgtpr6.wandb and b/wandb/run-20220120_135649-r5vgtpr6/run-r5vgtpr6.wandb differ
|
|
wandb/run-20220120_151428-zjqubvsf/files/code/run_mlm_flax.py
ADDED
@@ -0,0 +1,815 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding=utf-8
|
3 |
+
# Copyright 2021 The HuggingFace Team All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""
|
17 |
+
Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a
|
18 |
+
text file or a dataset.
|
19 |
+
|
20 |
+
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
|
21 |
+
https://huggingface.co/models?filter=fill-mask
|
22 |
+
"""
|
23 |
+
import json
|
24 |
+
import logging
|
25 |
+
import math
|
26 |
+
import os
|
27 |
+
import sys
|
28 |
+
import time
|
29 |
+
from dataclasses import asdict, dataclass, field
|
30 |
+
from enum import Enum
|
31 |
+
from itertools import chain
|
32 |
+
|
33 |
+
# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments.
|
34 |
+
from pathlib import Path
|
35 |
+
from typing import Dict, List, Optional, Tuple
|
36 |
+
|
37 |
+
import numpy as np
|
38 |
+
from datasets import load_dataset
|
39 |
+
from tqdm import tqdm
|
40 |
+
|
41 |
+
import flax
|
42 |
+
import jax
|
43 |
+
import jax.numpy as jnp
|
44 |
+
import optax
|
45 |
+
from flax import jax_utils, traverse_util
|
46 |
+
from flax.training import train_state
|
47 |
+
from flax.training.common_utils import get_metrics, onehot, shard
|
48 |
+
from huggingface_hub import Repository
|
49 |
+
from transformers import (
|
50 |
+
CONFIG_MAPPING,
|
51 |
+
FLAX_MODEL_FOR_MASKED_LM_MAPPING,
|
52 |
+
AutoConfig,
|
53 |
+
AutoTokenizer,
|
54 |
+
FlaxAutoModelForMaskedLM,
|
55 |
+
HfArgumentParser,
|
56 |
+
PreTrainedTokenizerBase,
|
57 |
+
TensorType,
|
58 |
+
is_tensorboard_available,
|
59 |
+
set_seed,
|
60 |
+
)
|
61 |
+
from transformers.file_utils import get_full_repo_name
|
62 |
+
|
63 |
+
|
64 |
+
MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys())
|
65 |
+
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
|
66 |
+
|
67 |
+
|
68 |
+
@dataclass
|
69 |
+
class TrainingArguments:
|
70 |
+
output_dir: str = field(
|
71 |
+
metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
|
72 |
+
)
|
73 |
+
overwrite_output_dir: bool = field(
|
74 |
+
default=False,
|
75 |
+
metadata={
|
76 |
+
"help": (
|
77 |
+
"Overwrite the content of the output directory. "
|
78 |
+
"Use this to continue training if output_dir points to a checkpoint directory."
|
79 |
+
)
|
80 |
+
},
|
81 |
+
)
|
82 |
+
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
|
83 |
+
do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
|
84 |
+
per_device_train_batch_size: int = field(
|
85 |
+
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
|
86 |
+
)
|
87 |
+
per_device_eval_batch_size: int = field(
|
88 |
+
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
|
89 |
+
)
|
90 |
+
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."})
|
91 |
+
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."})
|
92 |
+
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"})
|
93 |
+
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"})
|
94 |
+
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."})
|
95 |
+
adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."})
|
96 |
+
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
|
97 |
+
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
|
98 |
+
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
|
99 |
+
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
|
100 |
+
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
|
101 |
+
seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."})
|
102 |
+
push_to_hub: bool = field(
|
103 |
+
default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."}
|
104 |
+
)
|
105 |
+
hub_model_id: str = field(
|
106 |
+
default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."}
|
107 |
+
)
|
108 |
+
hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."})
|
109 |
+
|
110 |
+
def __post_init__(self):
|
111 |
+
if self.output_dir is not None:
|
112 |
+
self.output_dir = os.path.expanduser(self.output_dir)
|
113 |
+
|
114 |
+
def to_dict(self):
|
115 |
+
"""
|
116 |
+
Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates
|
117 |
+
the token values by removing their value.
|
118 |
+
"""
|
119 |
+
d = asdict(self)
|
120 |
+
for k, v in d.items():
|
121 |
+
if isinstance(v, Enum):
|
122 |
+
d[k] = v.value
|
123 |
+
if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum):
|
124 |
+
d[k] = [x.value for x in v]
|
125 |
+
if k.endswith("_token"):
|
126 |
+
d[k] = f"<{k.upper()}>"
|
127 |
+
return d
|
128 |
+
|
129 |
+
|
130 |
+
@dataclass
|
131 |
+
class ModelArguments:
|
132 |
+
"""
|
133 |
+
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
|
134 |
+
"""
|
135 |
+
|
136 |
+
model_name_or_path: Optional[str] = field(
|
137 |
+
default=None,
|
138 |
+
metadata={
|
139 |
+
"help": "The model checkpoint for weights initialization."
|
140 |
+
"Don't set if you want to train a model from scratch."
|
141 |
+
},
|
142 |
+
)
|
143 |
+
model_type: Optional[str] = field(
|
144 |
+
default=None,
|
145 |
+
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
|
146 |
+
)
|
147 |
+
config_name: Optional[str] = field(
|
148 |
+
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
|
149 |
+
)
|
150 |
+
tokenizer_name: Optional[str] = field(
|
151 |
+
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
|
152 |
+
)
|
153 |
+
cache_dir: Optional[str] = field(
|
154 |
+
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
|
155 |
+
)
|
156 |
+
use_fast_tokenizer: bool = field(
|
157 |
+
default=True,
|
158 |
+
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
|
159 |
+
)
|
160 |
+
dtype: Optional[str] = field(
|
161 |
+
default="float32",
|
162 |
+
metadata={
|
163 |
+
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
|
164 |
+
},
|
165 |
+
)
|
166 |
+
|
167 |
+
|
168 |
+
@dataclass
|
169 |
+
class DataTrainingArguments:
|
170 |
+
"""
|
171 |
+
Arguments pertaining to what data we are going to input our model for training and eval.
|
172 |
+
"""
|
173 |
+
|
174 |
+
dataset_name: Optional[str] = field(
|
175 |
+
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
|
176 |
+
)
|
177 |
+
dataset_config_name: Optional[str] = field(
|
178 |
+
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
|
179 |
+
)
|
180 |
+
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
|
181 |
+
validation_file: Optional[str] = field(
|
182 |
+
default=None,
|
183 |
+
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
|
184 |
+
)
|
185 |
+
train_ref_file: Optional[str] = field(
|
186 |
+
default=None,
|
187 |
+
metadata={"help": "An optional input train ref data file for whole word masking in Chinese."},
|
188 |
+
)
|
189 |
+
validation_ref_file: Optional[str] = field(
|
190 |
+
default=None,
|
191 |
+
metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."},
|
192 |
+
)
|
193 |
+
overwrite_cache: bool = field(
|
194 |
+
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
|
195 |
+
)
|
196 |
+
validation_split_percentage: Optional[int] = field(
|
197 |
+
default=5,
|
198 |
+
metadata={
|
199 |
+
"help": "The percentage of the train set used as validation set in case there's no validation split"
|
200 |
+
},
|
201 |
+
)
|
202 |
+
max_seq_length: Optional[int] = field(
|
203 |
+
default=None,
|
204 |
+
metadata={
|
205 |
+
"help": "The maximum total input sequence length after tokenization. Sequences longer "
|
206 |
+
"than this will be truncated. Default to the max input length of the model."
|
207 |
+
},
|
208 |
+
)
|
209 |
+
preprocessing_num_workers: Optional[int] = field(
|
210 |
+
default=None,
|
211 |
+
metadata={"help": "The number of processes to use for the preprocessing."},
|
212 |
+
)
|
213 |
+
mlm_probability: float = field(
|
214 |
+
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
|
215 |
+
)
|
216 |
+
pad_to_max_length: bool = field(
|
217 |
+
default=False,
|
218 |
+
metadata={
|
219 |
+
"help": "Whether to pad all samples to `max_seq_length`. "
|
220 |
+
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
|
221 |
+
},
|
222 |
+
)
|
223 |
+
line_by_line: bool = field(
|
224 |
+
default=False,
|
225 |
+
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
|
226 |
+
)
|
227 |
+
|
228 |
+
def __post_init__(self):
|
229 |
+
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
|
230 |
+
raise ValueError("Need either a dataset name or a training/validation file.")
|
231 |
+
else:
|
232 |
+
if self.train_file is not None:
|
233 |
+
extension = self.train_file.split(".")[-1]
|
234 |
+
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
|
235 |
+
if self.validation_file is not None:
|
236 |
+
extension = self.validation_file.split(".")[-1]
|
237 |
+
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
|
238 |
+
|
239 |
+
|
240 |
+
@flax.struct.dataclass
|
241 |
+
class FlaxDataCollatorForLanguageModeling:
|
242 |
+
"""
|
243 |
+
Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
|
244 |
+
are not all of the same length.
|
245 |
+
|
246 |
+
Args:
|
247 |
+
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
|
248 |
+
The tokenizer used for encoding the data.
|
249 |
+
mlm_probability (:obj:`float`, `optional`, defaults to 0.15):
|
250 |
+
The probability with which to (randomly) mask tokens in the input.
|
251 |
+
|
252 |
+
.. note::
|
253 |
+
|
254 |
+
For best performance, this data collator should be used with a dataset having items that are dictionaries or
|
255 |
+
BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a
|
256 |
+
:class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the
|
257 |
+
argument :obj:`return_special_tokens_mask=True`.
|
258 |
+
"""
|
259 |
+
|
260 |
+
tokenizer: PreTrainedTokenizerBase
|
261 |
+
mlm_probability: float = 0.15
|
262 |
+
|
263 |
+
def __post_init__(self):
|
264 |
+
if self.tokenizer.mask_token is None:
|
265 |
+
raise ValueError(
|
266 |
+
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
|
267 |
+
"You should pass `mlm=False` to train on causal language modeling instead."
|
268 |
+
)
|
269 |
+
|
270 |
+
def __call__(self, examples: List[Dict[str, np.ndarray]], pad_to_multiple_of: int) -> Dict[str, np.ndarray]:
|
271 |
+
# Handle dict or lists with proper padding and conversion to tensor.
|
272 |
+
batch = self.tokenizer.pad(examples, pad_to_multiple_of=pad_to_multiple_of, return_tensors=TensorType.NUMPY)
|
273 |
+
|
274 |
+
# If special token mask has been preprocessed, pop it from the dict.
|
275 |
+
special_tokens_mask = batch.pop("special_tokens_mask", None)
|
276 |
+
|
277 |
+
batch["input_ids"], batch["labels"] = self.mask_tokens(
|
278 |
+
batch["input_ids"], special_tokens_mask=special_tokens_mask
|
279 |
+
)
|
280 |
+
return batch
|
281 |
+
|
282 |
+
def mask_tokens(
|
283 |
+
self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray]
|
284 |
+
) -> Tuple[np.ndarray, np.ndarray]:
|
285 |
+
"""
|
286 |
+
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
|
287 |
+
"""
|
288 |
+
labels = inputs.copy()
|
289 |
+
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
|
290 |
+
probability_matrix = np.full(labels.shape, self.mlm_probability)
|
291 |
+
special_tokens_mask = special_tokens_mask.astype("bool")
|
292 |
+
|
293 |
+
probability_matrix[special_tokens_mask] = 0.0
|
294 |
+
masked_indices = np.random.binomial(1, probability_matrix).astype("bool")
|
295 |
+
labels[~masked_indices] = -100 # We only compute loss on masked tokens
|
296 |
+
|
297 |
+
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
|
298 |
+
indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices
|
299 |
+
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
|
300 |
+
|
301 |
+
# 10% of the time, we replace masked input tokens with random word
|
302 |
+
indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool")
|
303 |
+
indices_random &= masked_indices & ~indices_replaced
|
304 |
+
|
305 |
+
random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4")
|
306 |
+
inputs[indices_random] = random_words[indices_random]
|
307 |
+
|
308 |
+
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
|
309 |
+
return inputs, labels
|
310 |
+
|
311 |
+
|
312 |
+
def generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndarray:
|
313 |
+
num_samples = len(samples_idx)
|
314 |
+
samples_to_remove = num_samples % batch_size
|
315 |
+
|
316 |
+
if samples_to_remove != 0:
|
317 |
+
samples_idx = samples_idx[:-samples_to_remove]
|
318 |
+
sections_split = num_samples // batch_size
|
319 |
+
batch_idx = np.split(samples_idx, sections_split)
|
320 |
+
return batch_idx
|
321 |
+
|
322 |
+
|
323 |
+
def write_train_metric(summary_writer, train_metrics, train_time, step):
|
324 |
+
summary_writer.scalar("train_time", train_time, step)
|
325 |
+
|
326 |
+
train_metrics = get_metrics(train_metrics)
|
327 |
+
for key, vals in train_metrics.items():
|
328 |
+
tag = f"train_{key}"
|
329 |
+
for i, val in enumerate(vals):
|
330 |
+
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
|
331 |
+
|
332 |
+
|
333 |
+
def write_eval_metric(summary_writer, eval_metrics, step):
|
334 |
+
for metric_name, value in eval_metrics.items():
|
335 |
+
summary_writer.scalar(f"eval_{metric_name}", value, step)
|
336 |
+
|
337 |
+
|
338 |
+
def main():
|
339 |
+
# See all possible arguments in src/transformers/training_args.py
|
340 |
+
# or by passing the --help flag to this script.
|
341 |
+
# We now keep distinct sets of args, for a cleaner separation of concerns.
|
342 |
+
|
343 |
+
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
|
344 |
+
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
345 |
+
# If we pass only one argument to the script and it's the path to a json file,
|
346 |
+
# let's parse it to get our arguments.
|
347 |
+
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
|
348 |
+
else:
|
349 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
350 |
+
|
351 |
+
if (
|
352 |
+
os.path.exists(training_args.output_dir)
|
353 |
+
and os.listdir(training_args.output_dir)
|
354 |
+
and training_args.do_train
|
355 |
+
and not training_args.overwrite_output_dir
|
356 |
+
):
|
357 |
+
raise ValueError(
|
358 |
+
f"Output directory ({training_args.output_dir}) already exists and is not empty."
|
359 |
+
"Use --overwrite_output_dir to overcome."
|
360 |
+
)
|
361 |
+
|
362 |
+
# Setup logging
|
363 |
+
logging.basicConfig(
|
364 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
365 |
+
level=logging.INFO,
|
366 |
+
datefmt="[%X]",
|
367 |
+
)
|
368 |
+
|
369 |
+
# Log on each process the small summary:
|
370 |
+
logger = logging.getLogger(__name__)
|
371 |
+
|
372 |
+
# Set the verbosity to info of the Transformers logger (on main process only):
|
373 |
+
logger.info(f"Training/evaluation parameters {training_args}")
|
374 |
+
|
375 |
+
# Set seed before initializing model.
|
376 |
+
set_seed(training_args.seed)
|
377 |
+
|
378 |
+
# Handle the repository creation
|
379 |
+
if training_args.push_to_hub:
|
380 |
+
if training_args.hub_model_id is None:
|
381 |
+
repo_name = get_full_repo_name(
|
382 |
+
Path(training_args.output_dir).absolute().name, token=training_args.hub_token
|
383 |
+
)
|
384 |
+
else:
|
385 |
+
repo_name = training_args.hub_model_id
|
386 |
+
repo = Repository(training_args.output_dir, clone_from=repo_name)
|
387 |
+
|
388 |
+
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
|
389 |
+
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
|
390 |
+
# (the dataset will be downloaded automatically from the datasets Hub).
|
391 |
+
#
|
392 |
+
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
|
393 |
+
# 'text' is found. You can easily tweak this behavior (see below).
|
394 |
+
#
|
395 |
+
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
|
396 |
+
# download the dataset.
|
397 |
+
if data_args.dataset_name is not None:
|
398 |
+
# Downloading and loading a dataset from the hub.
|
399 |
+
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
|
400 |
+
|
401 |
+
if "validation" not in datasets.keys():
|
402 |
+
datasets["validation"] = load_dataset(
|
403 |
+
data_args.dataset_name,
|
404 |
+
data_args.dataset_config_name,
|
405 |
+
split=f"train[:{data_args.validation_split_percentage}%]",
|
406 |
+
cache_dir=model_args.cache_dir,
|
407 |
+
)
|
408 |
+
datasets["train"] = load_dataset(
|
409 |
+
data_args.dataset_name,
|
410 |
+
data_args.dataset_config_name,
|
411 |
+
split=f"train[{data_args.validation_split_percentage}%:]",
|
412 |
+
cache_dir=model_args.cache_dir,
|
413 |
+
)
|
414 |
+
else:
|
415 |
+
data_files = {}
|
416 |
+
if data_args.train_file is not None:
|
417 |
+
data_files["train"] = data_args.train_file
|
418 |
+
if data_args.validation_file is not None:
|
419 |
+
data_files["validation"] = data_args.validation_file
|
420 |
+
extension = data_args.train_file.split(".")[-1]
|
421 |
+
if extension == "txt":
|
422 |
+
extension = "text"
|
423 |
+
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
|
424 |
+
|
425 |
+
if "validation" not in datasets.keys():
|
426 |
+
datasets["validation"] = load_dataset(
|
427 |
+
extension,
|
428 |
+
data_files=data_files,
|
429 |
+
split=f"train[:{data_args.validation_split_percentage}%]",
|
430 |
+
cache_dir=model_args.cache_dir,
|
431 |
+
)
|
432 |
+
datasets["train"] = load_dataset(
|
433 |
+
extension,
|
434 |
+
data_files=data_files,
|
435 |
+
split=f"train[{data_args.validation_split_percentage}%:]",
|
436 |
+
cache_dir=model_args.cache_dir,
|
437 |
+
)
|
438 |
+
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
439 |
+
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
440 |
+
|
441 |
+
# Load pretrained model and tokenizer
|
442 |
+
|
443 |
+
# Distributed training:
|
444 |
+
# The .from_pretrained methods guarantee that only one local process can concurrently
|
445 |
+
# download model & vocab.
|
446 |
+
if model_args.config_name:
|
447 |
+
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
|
448 |
+
elif model_args.model_name_or_path:
|
449 |
+
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
|
450 |
+
else:
|
451 |
+
config = CONFIG_MAPPING[model_args.model_type]()
|
452 |
+
logger.warning("You are instantiating a new config instance from scratch.")
|
453 |
+
|
454 |
+
if model_args.tokenizer_name:
|
455 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
456 |
+
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
|
457 |
+
)
|
458 |
+
elif model_args.model_name_or_path:
|
459 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
460 |
+
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
|
461 |
+
)
|
462 |
+
else:
|
463 |
+
raise ValueError(
|
464 |
+
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
|
465 |
+
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
|
466 |
+
)
|
467 |
+
|
468 |
+
# Preprocessing the datasets.
|
469 |
+
# First we tokenize all the texts.
|
470 |
+
if training_args.do_train:
|
471 |
+
column_names = datasets["train"].column_names
|
472 |
+
else:
|
473 |
+
column_names = datasets["validation"].column_names
|
474 |
+
text_column_name = "text" if "text" in column_names else column_names[0]
|
475 |
+
|
476 |
+
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
|
477 |
+
|
478 |
+
if data_args.line_by_line:
|
479 |
+
# When using line_by_line, we just tokenize each nonempty line.
|
480 |
+
padding = "max_length" if data_args.pad_to_max_length else False
|
481 |
+
|
482 |
+
def tokenize_function(examples):
|
483 |
+
# Remove empty lines
|
484 |
+
examples = [line for line in examples if len(line) > 0 and not line.isspace()]
|
485 |
+
return tokenizer(
|
486 |
+
examples,
|
487 |
+
return_special_tokens_mask=True,
|
488 |
+
padding=padding,
|
489 |
+
truncation=True,
|
490 |
+
max_length=max_seq_length,
|
491 |
+
)
|
492 |
+
|
493 |
+
tokenized_datasets = datasets.map(
|
494 |
+
tokenize_function,
|
495 |
+
input_columns=[text_column_name],
|
496 |
+
batched=True,
|
497 |
+
num_proc=data_args.preprocessing_num_workers,
|
498 |
+
remove_columns=column_names,
|
499 |
+
load_from_cache_file=not data_args.overwrite_cache,
|
500 |
+
)
|
501 |
+
|
502 |
+
else:
|
503 |
+
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
|
504 |
+
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
|
505 |
+
# efficient when it receives the `special_tokens_mask`.
|
506 |
+
def tokenize_function(examples):
|
507 |
+
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
|
508 |
+
|
509 |
+
tokenized_datasets = datasets.map(
|
510 |
+
tokenize_function,
|
511 |
+
batched=True,
|
512 |
+
num_proc=data_args.preprocessing_num_workers,
|
513 |
+
remove_columns=column_names,
|
514 |
+
load_from_cache_file=not data_args.overwrite_cache,
|
515 |
+
)
|
516 |
+
|
517 |
+
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
|
518 |
+
# max_seq_length.
|
519 |
+
def group_texts(examples):
|
520 |
+
# Concatenate all texts.
|
521 |
+
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
|
522 |
+
total_length = len(concatenated_examples[list(examples.keys())[0]])
|
523 |
+
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
|
524 |
+
# customize this part to your needs.
|
525 |
+
if total_length >= max_seq_length:
|
526 |
+
total_length = (total_length // max_seq_length) * max_seq_length
|
527 |
+
# Split by chunks of max_len.
|
528 |
+
result = {
|
529 |
+
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
|
530 |
+
for k, t in concatenated_examples.items()
|
531 |
+
}
|
532 |
+
return result
|
533 |
+
|
534 |
+
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
|
535 |
+
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
|
536 |
+
# might be slower to preprocess.
|
537 |
+
#
|
538 |
+
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
|
539 |
+
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
|
540 |
+
tokenized_datasets = tokenized_datasets.map(
|
541 |
+
group_texts,
|
542 |
+
batched=True,
|
543 |
+
num_proc=data_args.preprocessing_num_workers,
|
544 |
+
load_from_cache_file=not data_args.overwrite_cache,
|
545 |
+
)
|
546 |
+
|
547 |
+
# Enable tensorboard only on the master node
|
548 |
+
has_tensorboard = is_tensorboard_available()
|
549 |
+
if has_tensorboard and jax.process_index() == 0:
|
550 |
+
try:
|
551 |
+
# Enable Weight&Biases
|
552 |
+
import wandb
|
553 |
+
wandb.init(
|
554 |
+
entity='versae',
|
555 |
+
project='roberta-base-ncc',
|
556 |
+
sync_tensorboard=True,
|
557 |
+
)
|
558 |
+
wandb.config.update(training_args)
|
559 |
+
wandb.config.update(model_args)
|
560 |
+
wandb.config.update(data_args)
|
561 |
+
|
562 |
+
from flax.metrics.tensorboard import SummaryWriter
|
563 |
+
|
564 |
+
summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
|
565 |
+
except ImportError as ie:
|
566 |
+
has_tensorboard = False
|
567 |
+
logger.warning(
|
568 |
+
f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
|
569 |
+
)
|
570 |
+
else:
|
571 |
+
logger.warning(
|
572 |
+
"Unable to display metrics through TensorBoard because the package is not installed: "
|
573 |
+
"Please run pip install tensorboard to enable."
|
574 |
+
)
|
575 |
+
|
576 |
+
# Data collator
|
577 |
+
# This one will take care of randomly masking the tokens.
|
578 |
+
data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability)
|
579 |
+
|
580 |
+
# Initialize our training
|
581 |
+
rng = jax.random.PRNGKey(training_args.seed)
|
582 |
+
dropout_rngs = jax.random.split(rng, jax.local_device_count())
|
583 |
+
|
584 |
+
if model_args.model_name_or_path:
|
585 |
+
model = FlaxAutoModelForMaskedLM.from_pretrained(
|
586 |
+
model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
|
587 |
+
)
|
588 |
+
else:
|
589 |
+
model = FlaxAutoModelForMaskedLM.from_config(
|
590 |
+
config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
|
591 |
+
)
|
592 |
+
|
593 |
+
# Store some constant
|
594 |
+
num_epochs = int(training_args.num_train_epochs)
|
595 |
+
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
|
596 |
+
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
|
597 |
+
|
598 |
+
num_train_steps = len(tokenized_datasets["train"]) // train_batch_size * num_epochs
|
599 |
+
|
600 |
+
# Create learning rate schedule
|
601 |
+
warmup_fn = optax.linear_schedule(
|
602 |
+
init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps
|
603 |
+
)
|
604 |
+
decay_fn = optax.linear_schedule(
|
605 |
+
init_value=training_args.learning_rate,
|
606 |
+
end_value=0,
|
607 |
+
transition_steps=num_train_steps - training_args.warmup_steps,
|
608 |
+
)
|
609 |
+
linear_decay_lr_schedule_fn = optax.join_schedules(
|
610 |
+
schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps]
|
611 |
+
)
|
612 |
+
|
613 |
+
# We use Optax's "masking" functionality to not apply weight decay
|
614 |
+
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
|
615 |
+
# mask boolean with the same structure as the parameters.
|
616 |
+
# The mask is True for parameters that should be decayed.
|
617 |
+
# Note that this mask is specifically adapted for FlaxBERT-like models.
|
618 |
+
# For other models, one should correct the layer norm parameter naming
|
619 |
+
# accordingly.
|
620 |
+
def decay_mask_fn(params):
|
621 |
+
flat_params = traverse_util.flatten_dict(params)
|
622 |
+
flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params}
|
623 |
+
return traverse_util.unflatten_dict(flat_mask)
|
624 |
+
|
625 |
+
# create adam optimizer
|
626 |
+
if training_args.adafactor:
|
627 |
+
# We use the default parameters here to initialize adafactor,
|
628 |
+
# For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74
|
629 |
+
optimizer = optax.adafactor(
|
630 |
+
learning_rate=linear_decay_lr_schedule_fn,
|
631 |
+
)
|
632 |
+
else:
|
633 |
+
optimizer = optax.adamw(
|
634 |
+
learning_rate=linear_decay_lr_schedule_fn,
|
635 |
+
b1=training_args.adam_beta1,
|
636 |
+
b2=training_args.adam_beta2,
|
637 |
+
eps=training_args.adam_epsilon,
|
638 |
+
weight_decay=training_args.weight_decay,
|
639 |
+
mask=decay_mask_fn,
|
640 |
+
)
|
641 |
+
|
642 |
+
# Setup train state
|
643 |
+
state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer)
|
644 |
+
|
645 |
+
# Define gradient update step fn
|
646 |
+
def train_step(state, batch, dropout_rng):
|
647 |
+
dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
|
648 |
+
|
649 |
+
def loss_fn(params):
|
650 |
+
labels = batch.pop("labels")
|
651 |
+
|
652 |
+
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
|
653 |
+
|
654 |
+
# compute loss, ignore padded input tokens
|
655 |
+
label_mask = jnp.where(labels > 0, 1.0, 0.0)
|
656 |
+
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
|
657 |
+
|
658 |
+
# take average
|
659 |
+
loss = loss.sum() / label_mask.sum()
|
660 |
+
|
661 |
+
return loss
|
662 |
+
|
663 |
+
grad_fn = jax.value_and_grad(loss_fn)
|
664 |
+
loss, grad = grad_fn(state.params)
|
665 |
+
grad = jax.lax.pmean(grad, "batch")
|
666 |
+
new_state = state.apply_gradients(grads=grad)
|
667 |
+
|
668 |
+
metrics = jax.lax.pmean(
|
669 |
+
{"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch"
|
670 |
+
)
|
671 |
+
|
672 |
+
return new_state, metrics, new_dropout_rng
|
673 |
+
|
674 |
+
# Create parallel version of the train step
|
675 |
+
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
|
676 |
+
|
677 |
+
# Define eval fn
|
678 |
+
def eval_step(params, batch):
|
679 |
+
labels = batch.pop("labels")
|
680 |
+
|
681 |
+
logits = model(**batch, params=params, train=False)[0]
|
682 |
+
|
683 |
+
# compute loss, ignore padded input tokens
|
684 |
+
label_mask = jnp.where(labels > 0, 1.0, 0.0)
|
685 |
+
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
|
686 |
+
|
687 |
+
# compute accuracy
|
688 |
+
accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask
|
689 |
+
|
690 |
+
# summarize metrics
|
691 |
+
metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()}
|
692 |
+
metrics = jax.lax.psum(metrics, axis_name="batch")
|
693 |
+
|
694 |
+
return metrics
|
695 |
+
|
696 |
+
p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,))
|
697 |
+
|
698 |
+
# Replicate the train state on each device
|
699 |
+
state = jax_utils.replicate(state)
|
700 |
+
|
701 |
+
train_time = 0
|
702 |
+
epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
|
703 |
+
for epoch in epochs:
|
704 |
+
# ======================== Training ================================
|
705 |
+
train_start = time.time()
|
706 |
+
train_metrics = []
|
707 |
+
|
708 |
+
# Create sampling rng
|
709 |
+
rng, input_rng = jax.random.split(rng)
|
710 |
+
|
711 |
+
# Generate an epoch by shuffling sampling indices from the train dataset
|
712 |
+
num_train_samples = len(tokenized_datasets["train"])
|
713 |
+
train_samples_idx = jax.random.permutation(input_rng, jnp.arange(num_train_samples))
|
714 |
+
train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size)
|
715 |
+
|
716 |
+
# Gather the indexes for creating the batch and do a training step
|
717 |
+
for step, batch_idx in enumerate(tqdm(train_batch_idx, desc="Training...", position=1)):
|
718 |
+
samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx]
|
719 |
+
model_inputs = data_collator(samples, pad_to_multiple_of=16)
|
720 |
+
|
721 |
+
# Model forward
|
722 |
+
model_inputs = shard(model_inputs.data)
|
723 |
+
state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs)
|
724 |
+
train_metrics.append(train_metric)
|
725 |
+
|
726 |
+
cur_step = epoch * (num_train_samples // train_batch_size) + step
|
727 |
+
|
728 |
+
if cur_step % training_args.logging_steps == 0 and cur_step > 0:
|
729 |
+
# Save metrics
|
730 |
+
train_metric = jax_utils.unreplicate(train_metric)
|
731 |
+
train_time += time.time() - train_start
|
732 |
+
if has_tensorboard and jax.process_index() == 0:
|
733 |
+
write_train_metric(summary_writer, train_metrics, train_time, cur_step)
|
734 |
+
|
735 |
+
epochs.write(
|
736 |
+
f"Step... ({cur_step} | Loss: {train_metric['loss']}, Learning Rate: {train_metric['learning_rate']})"
|
737 |
+
)
|
738 |
+
|
739 |
+
train_metrics = []
|
740 |
+
|
741 |
+
if cur_step % training_args.eval_steps == 0 and cur_step > 0:
|
742 |
+
# ======================== Evaluating ==============================
|
743 |
+
num_eval_samples = len(tokenized_datasets["validation"])
|
744 |
+
eval_samples_idx = jnp.arange(num_eval_samples)
|
745 |
+
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
|
746 |
+
|
747 |
+
eval_metrics = []
|
748 |
+
for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
|
749 |
+
samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx]
|
750 |
+
model_inputs = data_collator(samples, pad_to_multiple_of=16)
|
751 |
+
|
752 |
+
# Model forward
|
753 |
+
model_inputs = shard(model_inputs.data)
|
754 |
+
metrics = p_eval_step(state.params, model_inputs)
|
755 |
+
eval_metrics.append(metrics)
|
756 |
+
|
757 |
+
# normalize eval metrics
|
758 |
+
eval_metrics = get_metrics(eval_metrics)
|
759 |
+
eval_metrics = jax.tree_map(jnp.sum, eval_metrics)
|
760 |
+
eval_normalizer = eval_metrics.pop("normalizer")
|
761 |
+
eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics)
|
762 |
+
|
763 |
+
# Update progress bar
|
764 |
+
epochs.desc = f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})"
|
765 |
+
|
766 |
+
# Save metrics
|
767 |
+
if has_tensorboard and jax.process_index() == 0:
|
768 |
+
write_eval_metric(summary_writer, eval_metrics, cur_step)
|
769 |
+
|
770 |
+
if cur_step % training_args.save_steps == 0 and cur_step > 0:
|
771 |
+
# save checkpoint after each epoch and push checkpoint to the hub
|
772 |
+
if jax.process_index() == 0:
|
773 |
+
params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
|
774 |
+
model.save_pretrained(training_args.output_dir, params=params)
|
775 |
+
tokenizer.save_pretrained(training_args.output_dir)
|
776 |
+
if training_args.push_to_hub:
|
777 |
+
repo.push_to_hub(commit_message=f"Saving weights and logs of step {cur_step}", blocking=False)
|
778 |
+
|
779 |
+
# Eval after training
|
780 |
+
if training_args.do_eval:
|
781 |
+
num_eval_samples = len(tokenized_datasets["validation"])
|
782 |
+
eval_samples_idx = jnp.arange(num_eval_samples)
|
783 |
+
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
|
784 |
+
|
785 |
+
eval_metrics = []
|
786 |
+
for _, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
|
787 |
+
samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx]
|
788 |
+
model_inputs = data_collator(samples, pad_to_multiple_of=16)
|
789 |
+
|
790 |
+
# Model forward
|
791 |
+
model_inputs = shard(model_inputs.data)
|
792 |
+
metrics = p_eval_step(state.params, model_inputs)
|
793 |
+
eval_metrics.append(metrics)
|
794 |
+
|
795 |
+
# normalize eval metrics
|
796 |
+
eval_metrics = get_metrics(eval_metrics)
|
797 |
+
eval_metrics = jax.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics)
|
798 |
+
eval_normalizer = eval_metrics.pop("normalizer")
|
799 |
+
eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics)
|
800 |
+
|
801 |
+
try:
|
802 |
+
perplexity = math.exp(eval_metrics["loss"])
|
803 |
+
except OverflowError:
|
804 |
+
perplexity = float("inf")
|
805 |
+
eval_metrics["perplexity"] = perplexity
|
806 |
+
|
807 |
+
if jax.process_index() == 0:
|
808 |
+
eval_metrics = {f"eval_{metric_name}": value for metric_name, value in eval_metrics.items()}
|
809 |
+
path = os.path.join(training_args.output_dir, "eval_results.json")
|
810 |
+
with open(path, "w") as f:
|
811 |
+
json.dump(eval_metrics, f, indent=4, sort_keys=True)
|
812 |
+
|
813 |
+
|
814 |
+
if __name__ == "__main__":
|
815 |
+
main()
|
wandb/run-20220120_151428-zjqubvsf/files/config.yaml
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
wandb_version: 1
|
2 |
+
|
3 |
+
_wandb:
|
4 |
+
desc: null
|
5 |
+
value:
|
6 |
+
cli_version: 0.12.9
|
7 |
+
code_path: code/run_mlm_flax.py
|
8 |
+
framework: huggingface
|
9 |
+
huggingface_version: 4.16.0.dev0
|
10 |
+
is_jupyter_run: false
|
11 |
+
is_kaggle_kernel: false
|
12 |
+
python_version: 3.8.10
|
13 |
+
start_time: 1642691668
|
14 |
+
t:
|
15 |
+
1:
|
16 |
+
- 2
|
17 |
+
- 3
|
18 |
+
- 11
|
19 |
+
- 12
|
20 |
+
4: 3.8.10
|
21 |
+
5: 0.12.9
|
22 |
+
6: 4.16.0.dev0
|
23 |
+
8:
|
24 |
+
- 5
|
25 |
+
adafactor:
|
26 |
+
desc: null
|
27 |
+
value: false
|
28 |
+
adam_beta1:
|
29 |
+
desc: null
|
30 |
+
value: 0.9
|
31 |
+
adam_beta2:
|
32 |
+
desc: null
|
33 |
+
value: 0.98
|
34 |
+
adam_epsilon:
|
35 |
+
desc: null
|
36 |
+
value: 1.0e-06
|
37 |
+
cache_dir:
|
38 |
+
desc: null
|
39 |
+
value: null
|
40 |
+
config_name:
|
41 |
+
desc: null
|
42 |
+
value: ./
|
43 |
+
dataset_config_name:
|
44 |
+
desc: null
|
45 |
+
value: null
|
46 |
+
dataset_name:
|
47 |
+
desc: null
|
48 |
+
value: NbAiLab/NCC
|
49 |
+
do_eval:
|
50 |
+
desc: null
|
51 |
+
value: true
|
52 |
+
do_train:
|
53 |
+
desc: null
|
54 |
+
value: true
|
55 |
+
dtype:
|
56 |
+
desc: null
|
57 |
+
value: bfloat16
|
58 |
+
eval_steps:
|
59 |
+
desc: null
|
60 |
+
value: 1000
|
61 |
+
hub_model_id:
|
62 |
+
desc: null
|
63 |
+
value: null
|
64 |
+
hub_token:
|
65 |
+
desc: null
|
66 |
+
value: null
|
67 |
+
learning_rate:
|
68 |
+
desc: null
|
69 |
+
value: 0.00015
|
70 |
+
line_by_line:
|
71 |
+
desc: null
|
72 |
+
value: false
|
73 |
+
logging_steps:
|
74 |
+
desc: null
|
75 |
+
value: 1000
|
76 |
+
max_seq_length:
|
77 |
+
desc: null
|
78 |
+
value: 512
|
79 |
+
mlm_probability:
|
80 |
+
desc: null
|
81 |
+
value: 0.15
|
82 |
+
model_name_or_path:
|
83 |
+
desc: null
|
84 |
+
value: versae/roberta-base-ncc
|
85 |
+
model_type:
|
86 |
+
desc: null
|
87 |
+
value: roberta
|
88 |
+
num_train_epochs:
|
89 |
+
desc: null
|
90 |
+
value: 3.0
|
91 |
+
output_dir:
|
92 |
+
desc: null
|
93 |
+
value: ./
|
94 |
+
overwrite_cache:
|
95 |
+
desc: null
|
96 |
+
value: false
|
97 |
+
overwrite_output_dir:
|
98 |
+
desc: null
|
99 |
+
value: true
|
100 |
+
pad_to_max_length:
|
101 |
+
desc: null
|
102 |
+
value: true
|
103 |
+
per_device_eval_batch_size:
|
104 |
+
desc: null
|
105 |
+
value: 46
|
106 |
+
per_device_train_batch_size:
|
107 |
+
desc: null
|
108 |
+
value: 46
|
109 |
+
preprocessing_num_workers:
|
110 |
+
desc: null
|
111 |
+
value: null
|
112 |
+
push_to_hub:
|
113 |
+
desc: null
|
114 |
+
value: true
|
115 |
+
save_steps:
|
116 |
+
desc: null
|
117 |
+
value: 1000
|
118 |
+
seed:
|
119 |
+
desc: null
|
120 |
+
value: 42
|
121 |
+
tokenizer_name:
|
122 |
+
desc: null
|
123 |
+
value: ./
|
124 |
+
train_file:
|
125 |
+
desc: null
|
126 |
+
value: null
|
127 |
+
train_ref_file:
|
128 |
+
desc: null
|
129 |
+
value: null
|
130 |
+
use_fast_tokenizer:
|
131 |
+
desc: null
|
132 |
+
value: true
|
133 |
+
validation_file:
|
134 |
+
desc: null
|
135 |
+
value: null
|
136 |
+
validation_ref_file:
|
137 |
+
desc: null
|
138 |
+
value: null
|
139 |
+
validation_split_percentage:
|
140 |
+
desc: null
|
141 |
+
value: 5
|
142 |
+
warmup_steps:
|
143 |
+
desc: null
|
144 |
+
value: 1000
|
145 |
+
weight_decay:
|
146 |
+
desc: null
|
147 |
+
value: 0.01
|
wandb/run-20220120_151428-zjqubvsf/files/diff.patch
ADDED
The diff for this file is too large to render.
See raw diff
|
|
wandb/run-20220120_151428-zjqubvsf/files/events.out.tfevents.1642691672.t1v-n-00e295a4-w-0.2612942.0.v2
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
/data/roberta-base-ncc-512b/events.out.tfevents.1642691672.t1v-n-00e295a4-w-0.2612942.0.v2
|
wandb/run-20220120_151428-zjqubvsf/files/output.log
ADDED
@@ -0,0 +1,872 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2022-01-20 15:14:32.047305: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory
|
2 |
+
2022-01-20 15:14:32.047354: W tensorflow/stream_executor/cuda/cuda_driver.cc:269] failed call to cuInit: UNKNOWN ERROR (303)
|
3 |
+
Epoch ... (1/3): 0%| | 0/3 [00:00<?, ?it/s]
|
4 |
+
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
|
126 |
+
|
127 |
+
|
128 |
+
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
|
151 |
+
|
152 |
+
|
153 |
+
|
154 |
+
|
155 |
+
|
156 |
+
|
157 |
+
|
158 |
+
|
159 |
+
|
160 |
+
|
161 |
+
|
162 |
+
|
163 |
+
|
164 |
+
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
|
169 |
+
|
170 |
+
|
171 |
+
|
172 |
+
|
173 |
+
|
174 |
+
|
175 |
+
|
176 |
+
|
177 |
+
|
178 |
+
|
179 |
+
|
180 |
+
|
181 |
+
|
182 |
+
|
183 |
+
|
184 |
+
|
185 |
+
|
186 |
+
|
187 |
+
|
188 |
+
|
189 |
+
|
190 |
+
|
191 |
+
|
192 |
+
|
193 |
+
|
194 |
+
|
195 |
+
|
196 |
+
|
197 |
+
|
198 |
+
|
199 |
+
|
200 |
+
|
201 |
+
|
202 |
+
|
203 |
+
|
204 |
+
|
205 |
+
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
|
210 |
+
|
211 |
+
|
212 |
+
|
213 |
+
|
214 |
+
|
215 |
+
|
216 |
+
|
217 |
+
|
218 |
+
|
219 |
+
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
|
232 |
+
|
233 |
+
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
|
238 |
+
|
239 |
+
|
240 |
+
|
241 |
+
|
242 |
+
|
243 |
+
|
244 |
+
|
245 |
+
|
246 |
+
|
247 |
+
|
248 |
+
|
249 |
+
|
250 |
+
|
251 |
+
|
252 |
+
|
253 |
+
|
254 |
+
|
255 |
+
|
256 |
+
|
257 |
+
|
258 |
+
|
259 |
+
|
260 |
+
|
261 |
+
|
262 |
+
|
263 |
+
|
264 |
+
|
265 |
+
|
266 |
+
|
267 |
+
|
268 |
+
|
269 |
+
|
270 |
+
|
271 |
+
|
272 |
+
|
273 |
+
|
274 |
+
|
275 |
+
|
276 |
+
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
|
290 |
+
|
291 |
+
|
292 |
+
|
293 |
+
|
294 |
+
|
295 |
+
|
296 |
+
|
297 |
+
|
298 |
+
|
299 |
+
|
300 |
+
|
301 |
+
|
302 |
+
|
303 |
+
|
304 |
+
|
305 |
+
|
306 |
+
|
307 |
+
|
308 |
+
|
309 |
+
|
310 |
+
|
311 |
+
|
312 |
+
|
313 |
+
|
314 |
+
|
315 |
+
|
316 |
+
|
317 |
+
|
318 |
+
|
319 |
+
|
320 |
+
|
321 |
+
|
322 |
+
|
323 |
+
|
324 |
+
|
325 |
+
|
326 |
+
|
327 |
+
|
328 |
+
|
329 |
+
|
330 |
+
|
331 |
+
|
332 |
+
|
333 |
+
|
334 |
+
|
335 |
+
|
336 |
+
|
337 |
+
|
338 |
+
|
339 |
+
|
340 |
+
|
341 |
+
|
342 |
+
|
343 |
+
|
344 |
+
|
345 |
+
|
346 |
+
|
347 |
+
|
348 |
+
|
349 |
+
|
350 |
+
|
351 |
+
|
352 |
+
|
353 |
+
|
354 |
+
|
355 |
+
|
356 |
+
|
357 |
+
|
358 |
+
|
359 |
+
|
360 |
+
|
361 |
+
|
362 |
+
|
363 |
+
|
364 |
+
|
365 |
+
|
366 |
+
|
367 |
+
|
368 |
+
|
369 |
+
|
370 |
+
|
371 |
+
|
372 |
+
|
373 |
+
|
374 |
+
|
375 |
+
|
376 |
+
|
377 |
+
|
378 |
+
|
379 |
+
|
380 |
+
|
381 |
+
|
382 |
+
|
383 |
+
|
384 |
+
|
385 |
+
|
386 |
+
|
387 |
+
|
388 |
+
|
389 |
+
|
390 |
+
|
391 |
+
|
392 |
+
|
393 |
+
|
394 |
+
|
395 |
+
|
396 |
+
|
397 |
+
|
398 |
+
|
399 |
+
|
400 |
+
|
401 |
+
|
402 |
+
|
403 |
+
|
404 |
+
|
405 |
+
|
406 |
+
|
407 |
+
|
408 |
+
|
409 |
+
|
410 |
+
|
411 |
+
|
412 |
+
|
413 |
+
|
414 |
+
|
415 |
+
|
416 |
+
|
417 |
+
|
418 |
+
|
419 |
+
|
420 |
+
|
421 |
+
|
422 |
+
|
423 |
+
|
424 |
+
|
425 |
+
|
426 |
+
|
427 |
+
|
428 |
+
|
429 |
+
|
430 |
+
|
431 |
+
|
432 |
+
|
433 |
+
|
434 |
+
|
435 |
+
|
436 |
+
|
437 |
+
|
438 |
+
|
439 |
+
|
440 |
+
|
441 |
+
|
442 |
+
|
443 |
+
|
444 |
+
|
445 |
+
|
446 |
+
|
447 |
+
|
448 |
+
|
449 |
+
|
450 |
+
|
451 |
+
|
452 |
+
|
453 |
+
|
454 |
+
|
455 |
+
|
456 |
+
|
457 |
+
|
458 |
+
|
459 |
+
|
460 |
+
|
461 |
+
|
462 |
+
|
463 |
+
|
464 |
+
|
465 |
+
|
466 |
+
|
467 |
+
|
468 |
+
|
469 |
+
|
470 |
+
|
471 |
+
|
472 |
+
|
473 |
+
|
474 |
+
|
475 |
+
|
476 |
+
|
477 |
+
|
478 |
+
|
479 |
+
|
480 |
+
|
481 |
+
|
482 |
+
|
483 |
+
|
484 |
+
|
485 |
+
|
486 |
+
|
487 |
+
|
488 |
+
|
489 |
+
|
490 |
+
|
491 |
+
|
492 |
+
|
493 |
+
|
494 |
+
|
495 |
+
|
496 |
+
|
497 |
+
|
498 |
+
|
499 |
+
|
500 |
+
|
501 |
+
|
502 |
+
|
503 |
+
|
504 |
+
|
505 |
+
|
506 |
+
|
507 |
+
|
508 |
+
|
509 |
+
|
510 |
+
|
511 |
+
|
512 |
+
|
513 |
+
|
514 |
+
|
515 |
+
Training...: 2%|████▏ | 1000/54217 [18:35<14:55:43, 1.01s/it]
|
516 |
+
Step... (1000 | Loss: 1.8212230205535889, Learning Rate: 0.0001500000071246177)
|
517 |
+
Evaluating ...: 0%| | 0/1167 [00:00<?, ?it/s]
|
518 |
+
|
519 |
+
|
520 |
+
|
521 |
+
|
522 |
+
|
523 |
+
|
524 |
+
|
525 |
+
|
526 |
+
|
527 |
+
|
528 |
+
|
529 |
+
|
530 |
+
|
531 |
+
|
532 |
+
|
533 |
+
|
534 |
+
|
535 |
+
|
536 |
+
|
537 |
+
|
538 |
+
|
539 |
+
|
540 |
+
|
541 |
+
|
542 |
+
|
543 |
+
|
544 |
+
|
545 |
+
|
546 |
+
|
547 |
+
|
548 |
+
|
549 |
+
|
550 |
+
|
551 |
+
|
552 |
+
|
553 |
+
|
554 |
+
|
555 |
+
|
556 |
+
|
557 |
+
|
558 |
+
|
559 |
+
|
560 |
+
|
561 |
+
|
562 |
+
|
563 |
+
|
564 |
+
|
565 |
+
|
566 |
+
|
567 |
+
|
568 |
+
|
569 |
+
|
570 |
+
|
571 |
+
|
572 |
+
|
573 |
+
|
574 |
+
|
575 |
+
|
576 |
+
|
577 |
+
|
578 |
+
|
579 |
+
|
580 |
+
|
581 |
+
|
582 |
+
|
583 |
+
|
584 |
+
|
585 |
+
|
586 |
+
|
587 |
+
|
588 |
+
|
589 |
+
|
590 |
+
|
591 |
+
|
592 |
+
|
593 |
+
|
594 |
+
|
595 |
+
|
596 |
+
|
597 |
+
|
598 |
+
|
599 |
+
|
600 |
+
|
601 |
+
|
602 |
+
|
603 |
+
|
604 |
+
|
605 |
+
|
606 |
+
|
607 |
+
|
608 |
+
|
609 |
+
|
610 |
+
|
611 |
+
|
612 |
+
|
613 |
+
|
614 |
+
|
615 |
+
|
616 |
+
|
617 |
+
|
618 |
+
|
619 |
+
|
620 |
+
|
621 |
+
|
622 |
+
|
623 |
+
|
624 |
+
|
625 |
+
|
626 |
+
|
627 |
+
|
628 |
+
|
629 |
+
|
630 |
+
|
631 |
+
|
632 |
+
|
633 |
+
|
634 |
+
|
635 |
+
|
636 |
+
|
637 |
+
|
638 |
+
|
639 |
+
|
640 |
+
|
641 |
+
|
642 |
+
|
643 |
+
|
644 |
+
|
645 |
+
|
646 |
+
|
647 |
+
|
648 |
+
|
649 |
+
|
650 |
+
|
651 |
+
|
652 |
+
|
653 |
+
|
654 |
+
|
655 |
+
|
656 |
+
|
657 |
+
|
658 |
+
|
659 |
+
|
660 |
+
|
661 |
+
|
662 |
+
|
663 |
+
|
664 |
+
|
665 |
+
|
666 |
+
|
667 |
+
|
668 |
+
|
669 |
+
|
670 |
+
|
671 |
+
|
672 |
+
|
673 |
+
|
674 |
+
|
675 |
+
|
676 |
+
|
677 |
+
|
678 |
+
|
679 |
+
|
680 |
+
|
681 |
+
|
682 |
+
|
683 |
+
|
684 |
+
|
685 |
+
|
686 |
+
|
687 |
+
|
688 |
+
|
689 |
+
|
690 |
+
|
691 |
+
|
692 |
+
|
693 |
+
|
694 |
+
|
695 |
+
|
696 |
+
|
697 |
+
|
698 |
+
|
699 |
+
|
700 |
+
|
701 |
+
|
702 |
+
|
703 |
+
|
704 |
+
|
705 |
+
|
706 |
+
|
707 |
+
|
708 |
+
|
709 |
+
|
710 |
+
|
711 |
+
|
712 |
+
|
713 |
+
|
714 |
+
|
715 |
+
|
716 |
+
|
717 |
+
|
718 |
+
|
719 |
+
|
720 |
+
|
721 |
+
|
722 |
+
|
723 |
+
|
724 |
+
|
725 |
+
|
726 |
+
|
727 |
+
|
728 |
+
|
729 |
+
|
730 |
+
|
731 |
+
|
732 |
+
|
733 |
+
|
734 |
+
|
735 |
+
|
736 |
+
|
737 |
+
|
738 |
+
|
739 |
+
|
740 |
+
|
741 |
+
|
742 |
+
|
743 |
+
|
744 |
+
|
745 |
+
|
746 |
+
|
747 |
+
|
748 |
+
|
749 |
+
|
750 |
+
|
751 |
+
|
752 |
+
|
753 |
+
|
754 |
+
|
755 |
+
|
756 |
+
|
757 |
+
|
758 |
+
|
759 |
+
|
760 |
+
|
761 |
+
|
762 |
+
|
763 |
+
|
764 |
+
|
765 |
+
|
766 |
+
|
767 |
+
|
768 |
+
|
769 |
+
|
770 |
+
|
771 |
+
|
772 |
+
|
773 |
+
|
774 |
+
|
775 |
+
|
776 |
+
|
777 |
+
|
778 |
+
|
779 |
+
|
780 |
+
|
781 |
+
|
782 |
+
|
783 |
+
|
784 |
+
|
785 |
+
|
786 |
+
|
787 |
+
|
788 |
+
|
789 |
+
|
790 |
+
|
791 |
+
|
792 |
+
|
793 |
+
|
794 |
+
|
795 |
+
|
796 |
+
|
797 |
+
|
798 |
+
|
799 |
+
|
800 |
+
|
801 |
+
|
802 |
+
|
803 |
+
|
804 |
+
|
805 |
+
|
806 |
+
|
807 |
+
|
808 |
+
|
809 |
+
|
810 |
+
|
811 |
+
|
812 |
+
|
813 |
+
|
814 |
+
|
815 |
+
|
816 |
+
|
817 |
+
|
818 |
+
|
819 |
+
|
820 |
+
|
821 |
+
|
822 |
+
|
823 |
+
|
824 |
+
|
825 |
+
|
826 |
+
|
827 |
+
|
828 |
+
|
829 |
+
|
830 |
+
|
831 |
+
|
832 |
+
|
833 |
+
|
834 |
+
|
835 |
+
|
836 |
+
|
837 |
+
|
838 |
+
|
839 |
+
|
840 |
+
|
841 |
+
|
842 |
+
|
843 |
+
|
844 |
+
|
845 |
+
|
846 |
+
|
847 |
+
|
848 |
+
|
849 |
+
|
850 |
+
|
851 |
+
|
852 |
+
|
853 |
+
|
854 |
+
|
855 |
+
|
856 |
+
|
857 |
+
|
858 |
+
|
859 |
+
|
860 |
+
|
861 |
+
|
862 |
+
|
863 |
+
|
864 |
+
|
865 |
+
|
866 |
+
|
867 |
+
|
868 |
+
|
869 |
+
|
870 |
+
|
871 |
+
|
872 |
+
|
wandb/run-20220120_151428-zjqubvsf/files/requirements.txt
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==1.0.0
|
2 |
+
aiohttp==3.8.1
|
3 |
+
aiosignal==1.2.0
|
4 |
+
asttokens==2.0.5
|
5 |
+
astunparse==1.6.3
|
6 |
+
async-timeout==4.0.2
|
7 |
+
attrs==21.4.0
|
8 |
+
backcall==0.2.0
|
9 |
+
black==21.12b0
|
10 |
+
cachetools==4.2.4
|
11 |
+
certifi==2021.10.8
|
12 |
+
charset-normalizer==2.0.10
|
13 |
+
chex==0.1.0
|
14 |
+
click==8.0.3
|
15 |
+
configparser==5.2.0
|
16 |
+
cycler==0.11.0
|
17 |
+
datasets==1.17.1.dev0
|
18 |
+
decorator==5.1.1
|
19 |
+
dill==0.3.4
|
20 |
+
dm-tree==0.1.6
|
21 |
+
docker-pycreds==0.4.0
|
22 |
+
executing==0.8.2
|
23 |
+
filelock==3.4.2
|
24 |
+
flatbuffers==2.0
|
25 |
+
flax==0.3.6
|
26 |
+
fonttools==4.28.5
|
27 |
+
frozenlist==1.2.0
|
28 |
+
fsspec==2022.1.0
|
29 |
+
gast==0.4.0
|
30 |
+
gitdb==4.0.9
|
31 |
+
gitpython==3.1.26
|
32 |
+
google-auth-oauthlib==0.4.6
|
33 |
+
google-auth==2.3.3
|
34 |
+
google-pasta==0.2.0
|
35 |
+
grpcio==1.43.0
|
36 |
+
h5py==3.6.0
|
37 |
+
huggingface-hub==0.4.0
|
38 |
+
idna==3.3
|
39 |
+
importlib-metadata==4.10.1
|
40 |
+
ipython==8.0.0
|
41 |
+
jax==0.2.26
|
42 |
+
jaxlib==0.1.75
|
43 |
+
jedi==0.18.1
|
44 |
+
joblib==1.1.0
|
45 |
+
keras-preprocessing==1.1.2
|
46 |
+
keras==2.7.0
|
47 |
+
kiwisolver==1.3.2
|
48 |
+
libclang==12.0.0
|
49 |
+
libtpu-nightly==0.1.dev20211208
|
50 |
+
markdown==3.3.6
|
51 |
+
matplotlib-inline==0.1.3
|
52 |
+
matplotlib==3.5.1
|
53 |
+
msgpack==1.0.3
|
54 |
+
multidict==5.2.0
|
55 |
+
multiprocess==0.70.12.2
|
56 |
+
mypy-extensions==0.4.3
|
57 |
+
numpy==1.22.1
|
58 |
+
oauthlib==3.1.1
|
59 |
+
opt-einsum==3.3.0
|
60 |
+
optax==0.1.0
|
61 |
+
packaging==21.3
|
62 |
+
pandas==1.3.5
|
63 |
+
parso==0.8.3
|
64 |
+
pathspec==0.9.0
|
65 |
+
pathtools==0.1.2
|
66 |
+
pexpect==4.8.0
|
67 |
+
pickleshare==0.7.5
|
68 |
+
pillow==9.0.0
|
69 |
+
pip==20.0.2
|
70 |
+
pkg-resources==0.0.0
|
71 |
+
platformdirs==2.4.1
|
72 |
+
promise==2.3
|
73 |
+
prompt-toolkit==3.0.24
|
74 |
+
protobuf==3.19.3
|
75 |
+
psutil==5.9.0
|
76 |
+
ptyprocess==0.7.0
|
77 |
+
pure-eval==0.2.1
|
78 |
+
pyarrow==6.0.1
|
79 |
+
pyasn1-modules==0.2.8
|
80 |
+
pyasn1==0.4.8
|
81 |
+
pygments==2.11.2
|
82 |
+
pyparsing==3.0.6
|
83 |
+
python-dateutil==2.8.2
|
84 |
+
pytz==2021.3
|
85 |
+
pyyaml==6.0
|
86 |
+
regex==2021.11.10
|
87 |
+
requests-oauthlib==1.3.0
|
88 |
+
requests==2.27.1
|
89 |
+
rsa==4.8
|
90 |
+
sacremoses==0.0.47
|
91 |
+
scipy==1.7.3
|
92 |
+
sentry-sdk==1.5.2
|
93 |
+
setuptools==44.0.0
|
94 |
+
shortuuid==1.0.8
|
95 |
+
six==1.16.0
|
96 |
+
smmap==5.0.0
|
97 |
+
stack-data==0.1.4
|
98 |
+
subprocess32==3.5.4
|
99 |
+
tensorboard-data-server==0.6.1
|
100 |
+
tensorboard-plugin-wit==1.8.1
|
101 |
+
tensorboard==2.7.0
|
102 |
+
tensorflow-estimator==2.7.0
|
103 |
+
tensorflow-io-gcs-filesystem==0.23.1
|
104 |
+
tensorflow==2.7.0
|
105 |
+
termcolor==1.1.0
|
106 |
+
tokenizers==0.11.4
|
107 |
+
tomli==1.2.3
|
108 |
+
toolz==0.11.2
|
109 |
+
tqdm==4.62.3
|
110 |
+
traitlets==5.1.1
|
111 |
+
transformers==4.16.0.dev0
|
112 |
+
typing-extensions==4.0.1
|
113 |
+
urllib3==1.26.8
|
114 |
+
wandb==0.12.9
|
115 |
+
wcwidth==0.2.5
|
116 |
+
werkzeug==2.0.2
|
117 |
+
wheel==0.37.1
|
118 |
+
wrapt==1.13.3
|
119 |
+
xxhash==2.0.2
|
120 |
+
yarl==1.7.2
|
121 |
+
yaspin==2.1.0
|
122 |
+
zipp==3.7.0
|
wandb/run-20220120_151428-zjqubvsf/files/wandb-metadata.json
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"os": "Linux-5.4.0-1043-gcp-x86_64-with-glibc2.29",
|
3 |
+
"python": "3.8.10",
|
4 |
+
"heartbeatAt": "2022-01-20T15:14:31.883821",
|
5 |
+
"startedAt": "2022-01-20T15:14:28.478454",
|
6 |
+
"docker": null,
|
7 |
+
"cpu_count": 96,
|
8 |
+
"cuda": null,
|
9 |
+
"args": [
|
10 |
+
"--output_dir=./",
|
11 |
+
"--model_type=roberta",
|
12 |
+
"--model_name_or_path=versae/roberta-base-ncc",
|
13 |
+
"--config_name=./",
|
14 |
+
"--tokenizer_name=./",
|
15 |
+
"--dataset_name=NbAiLab/NCC",
|
16 |
+
"--max_seq_length=512",
|
17 |
+
"--weight_decay=0.01",
|
18 |
+
"--per_device_train_batch_size=46",
|
19 |
+
"--per_device_eval_batch_size=46",
|
20 |
+
"--pad_to_max_length",
|
21 |
+
"--learning_rate=0.00015",
|
22 |
+
"--warmup_steps=1000",
|
23 |
+
"--overwrite_output_dir",
|
24 |
+
"--num_train_epochs=3",
|
25 |
+
"--adam_beta1=0.9",
|
26 |
+
"--adam_beta2=0.98",
|
27 |
+
"--adam_epsilon=1e-6",
|
28 |
+
"--logging_steps=1000",
|
29 |
+
"--save_steps=1000",
|
30 |
+
"--eval_steps=1000",
|
31 |
+
"--do_train",
|
32 |
+
"--do_eval",
|
33 |
+
"--dtype=bfloat16",
|
34 |
+
"--push_to_hub"
|
35 |
+
],
|
36 |
+
"state": "running",
|
37 |
+
"program": "run_mlm_flax.py",
|
38 |
+
"codePath": "run_mlm_flax.py",
|
39 |
+
"git": {
|
40 |
+
"remote": "https://huggingface.co/versae/roberta-base-ncc-512b",
|
41 |
+
"commit": "bda7d6806db005f6c1862f0c8134554ae55df859"
|
42 |
+
},
|
43 |
+
"email": "versae@gmail.com",
|
44 |
+
"root": "/data/roberta-base-ncc-512b",
|
45 |
+
"host": "t1v-n-00e295a4-w-0",
|
46 |
+
"username": "javierr",
|
47 |
+
"executable": "/data/flax/bin/python"
|
48 |
+
}
|
wandb/run-20220120_151428-zjqubvsf/files/wandb-summary.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"global_step": 1000, "_timestamp": 1642692851.05338, "train_time": 1170.1522216796875, "train_learning_rate": 0.0001500000071246177, "_runtime": 1183, "_step": 1997, "train_loss": 1.8067307472229004}
|
wandb/run-20220120_151428-zjqubvsf/logs/debug-internal.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
wandb/run-20220120_151428-zjqubvsf/logs/debug.log
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2022-01-20 15:14:28,479 INFO MainThread:2612942 [wandb_setup.py:_flush():71] setting env: {}
|
2 |
+
2022-01-20 15:14:28,479 INFO MainThread:2612942 [wandb_setup.py:_flush():71] setting login settings: {}
|
3 |
+
2022-01-20 15:14:28,480 INFO MainThread:2612942 [wandb_init.py:_log_setup():371] Logging user logs to /data/roberta-base-ncc-512b/wandb/run-20220120_151428-zjqubvsf/logs/debug.log
|
4 |
+
2022-01-20 15:14:28,480 INFO MainThread:2612942 [wandb_init.py:_log_setup():372] Logging internal logs to /data/roberta-base-ncc-512b/wandb/run-20220120_151428-zjqubvsf/logs/debug-internal.log
|
5 |
+
2022-01-20 15:14:28,480 INFO MainThread:2612942 [wandb_init.py:init():404] calling init triggers
|
6 |
+
2022-01-20 15:14:28,480 INFO MainThread:2612942 [wandb_init.py:init():409] wandb.init called with sweep_config: {}
|
7 |
+
config: {}
|
8 |
+
2022-01-20 15:14:28,480 INFO MainThread:2612942 [wandb_init.py:init():460] starting backend
|
9 |
+
2022-01-20 15:14:28,480 INFO MainThread:2612942 [backend.py:_multiprocessing_setup():99] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
|
10 |
+
2022-01-20 15:14:28,520 INFO MainThread:2612942 [backend.py:ensure_launched():216] starting backend process...
|
11 |
+
2022-01-20 15:14:28,558 INFO MainThread:2612942 [backend.py:ensure_launched():221] started backend process with pid: 2614333
|
12 |
+
2022-01-20 15:14:28,560 INFO MainThread:2612942 [wandb_init.py:init():469] backend started and connected
|
13 |
+
2022-01-20 15:14:28,572 INFO MainThread:2612942 [wandb_init.py:init():533] updated telemetry
|
14 |
+
2022-01-20 15:14:28,664 INFO MainThread:2612942 [wandb_init.py:init():563] communicating current version
|
15 |
+
2022-01-20 15:14:29,397 INFO MainThread:2612942 [wandb_init.py:init():568] got version response
|
16 |
+
2022-01-20 15:14:29,397 INFO MainThread:2612942 [wandb_init.py:init():578] communicating run to backend with 30 second timeout
|
17 |
+
2022-01-20 15:14:29,595 INFO MainThread:2612942 [wandb_init.py:init():606] starting run threads in backend
|
18 |
+
2022-01-20 15:14:32,035 INFO MainThread:2612942 [wandb_run.py:_console_start():1810] atexit reg
|
19 |
+
2022-01-20 15:14:32,035 INFO MainThread:2612942 [wandb_run.py:_redirect():1684] redirect: SettingsConsole.REDIRECT
|
20 |
+
2022-01-20 15:14:32,036 INFO MainThread:2612942 [wandb_run.py:_redirect():1689] Redirecting console.
|
21 |
+
2022-01-20 15:14:32,038 INFO MainThread:2612942 [wandb_run.py:_redirect():1745] Redirects installed.
|
22 |
+
2022-01-20 15:14:32,038 INFO MainThread:2612942 [wandb_init.py:init():633] run started, returning control to user process
|
23 |
+
2022-01-20 15:14:32,038 INFO MainThread:2612942 [wandb_run.py:_config_callback():956] config_cb None None {'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'per_device_train_batch_size': 46, 'per_device_eval_batch_size': 46, 'learning_rate': 0.00015, 'weight_decay': 0.01, 'adam_beta1': 0.9, 'adam_beta2': 0.98, 'adam_epsilon': 1e-06, 'adafactor': False, 'num_train_epochs': 3.0, 'warmup_steps': 1000, 'logging_steps': 1000, 'save_steps': 1000, 'eval_steps': 1000, 'seed': 42, 'push_to_hub': True, 'hub_model_id': None, 'hub_token': None}
|
24 |
+
2022-01-20 15:14:32,039 INFO MainThread:2612942 [wandb_run.py:_config_callback():956] config_cb None None {'model_name_or_path': 'versae/roberta-base-ncc', 'model_type': 'roberta', 'config_name': './', 'tokenizer_name': './', 'cache_dir': None, 'use_fast_tokenizer': True, 'dtype': 'bfloat16'}
|
25 |
+
2022-01-20 15:14:32,039 INFO MainThread:2612942 [wandb_run.py:_config_callback():956] config_cb None None {'dataset_name': 'NbAiLab/NCC', 'dataset_config_name': None, 'train_file': None, 'validation_file': None, 'train_ref_file': None, 'validation_ref_file': None, 'overwrite_cache': False, 'validation_split_percentage': 5, 'max_seq_length': 512, 'preprocessing_num_workers': None, 'mlm_probability': 0.15, 'pad_to_max_length': True, 'line_by_line': False}
|
26 |
+
2022-01-20 15:14:32,059 INFO MainThread:2612942 [wandb_run.py:_tensorboard_callback():1029] tensorboard callback: ., None
|
wandb/run-20220120_151428-zjqubvsf/run-zjqubvsf.wandb
ADDED
Binary file (649 kB). View file
|
|