cmd/gc: add //go:nowritebarrier to diagnose unintended write barriers

//go:nowritebarrier can only be used in package runtime.
It does not disable write barriers; it is an assertion, checked
by the compiler, that the following function needs no write
barriers.

Change-Id: Id7978b779b66dc1feea39ee6bda9fd4d80280b7c
Reviewed-on: https://go-review.googlesource.com/1224
Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
Russ Cox 2014-12-09 10:15:18 -05:00
parent 2fa657c5d6
commit 557a61d270
7 changed files with 209 additions and 138 deletions

View file

@ -271,6 +271,7 @@ struct Node
uchar noescape; // func arguments do not escape
uchar nosplit; // func should not execute on separate stack
uchar builtin; // built-in name, like len or close
uchar nowritebarrier; // emit compiler error instead of write barrier
uchar walkdef;
uchar typecheck;
uchar local;
@ -987,6 +988,7 @@ EXTERN int flag_race;
EXTERN int flag_largemodel;
EXTERN int noescape;
EXTERN int nosplit;
EXTERN int nowritebarrier;
EXTERN int debuglive;
EXTERN Link* ctxt;

View file

@ -1319,6 +1319,7 @@ xfndcl:
$$->endlineno = lineno;
$$->noescape = noescape;
$$->nosplit = nosplit;
$$->nowritebarrier = nowritebarrier;
funcbody($$);
}
@ -1502,6 +1503,7 @@ xdcl_list:
nointerface = 0;
noescape = 0;
nosplit = 0;
nowritebarrier = 0;
}
vardcl_list:

View file

@ -1655,6 +1655,12 @@ go:
nosplit = 1;
goto out;
}
if(strcmp(lexbuf, "go:nowritebarrier") == 0) {
if(!compiling_runtime)
yyerror("//go:nowritebarrier only allowed in runtime");
nowritebarrier = 1;
goto out;
}
out:
return c;

View file

@ -2010,6 +2010,8 @@ applywritebarrier(Node *n, NodeList **init)
char name[32];
if(n->left && n->right && needwritebarrier(n->left, n->right)) {
if(curfn && curfn->nowritebarrier)
yyerror("write barrier prohibited");
t = n->left->type;
l = nod(OADDR, n->left, N);
l->etype = 1; // addr does not escape

View file

@ -678,22 +678,22 @@ static const yytype_uint16 yyrline[] =
1186, 1187, 1188, 1194, 1195, 1196, 1197, 1203, 1204, 1205,
1206, 1207, 1213, 1214, 1217, 1220, 1221, 1222, 1223, 1224,
1227, 1228, 1241, 1245, 1250, 1255, 1260, 1264, 1265, 1268,
1274, 1281, 1287, 1294, 1300, 1311, 1326, 1355, 1393, 1418,
1436, 1445, 1448, 1456, 1460, 1464, 1471, 1477, 1482, 1494,
1497, 1508, 1509, 1515, 1516, 1522, 1526, 1532, 1533, 1539,
1543, 1549, 1572, 1577, 1583, 1589, 1596, 1605, 1614, 1629,
1635, 1640, 1644, 1651, 1664, 1665, 1671, 1677, 1680, 1684,
1690, 1693, 1702, 1705, 1706, 1710, 1711, 1717, 1718, 1719,
1720, 1721, 1723, 1722, 1737, 1743, 1747, 1751, 1755, 1759,
1764, 1783, 1789, 1797, 1801, 1807, 1811, 1817, 1821, 1827,
1831, 1840, 1844, 1848, 1852, 1858, 1861, 1869, 1870, 1872,
1873, 1876, 1879, 1882, 1885, 1888, 1891, 1894, 1897, 1900,
1903, 1906, 1909, 1912, 1915, 1921, 1925, 1929, 1933, 1937,
1941, 1961, 1968, 1979, 1980, 1981, 1984, 1985, 1988, 1992,
2002, 2006, 2010, 2014, 2018, 2022, 2026, 2032, 2038, 2046,
2054, 2060, 2067, 2083, 2105, 2109, 2115, 2118, 2121, 2125,
2135, 2139, 2158, 2166, 2167, 2179, 2180, 2183, 2187, 2193,
2197, 2203, 2207
1274, 1281, 1287, 1294, 1300, 1311, 1327, 1356, 1394, 1419,
1437, 1446, 1449, 1457, 1461, 1465, 1472, 1478, 1483, 1495,
1498, 1510, 1511, 1517, 1518, 1524, 1528, 1534, 1535, 1541,
1545, 1551, 1574, 1579, 1585, 1591, 1598, 1607, 1616, 1631,
1637, 1642, 1646, 1653, 1666, 1667, 1673, 1679, 1682, 1686,
1692, 1695, 1704, 1707, 1708, 1712, 1713, 1719, 1720, 1721,
1722, 1723, 1725, 1724, 1739, 1745, 1749, 1753, 1757, 1761,
1766, 1785, 1791, 1799, 1803, 1809, 1813, 1819, 1823, 1829,
1833, 1842, 1846, 1850, 1854, 1860, 1863, 1871, 1872, 1874,
1875, 1878, 1881, 1884, 1887, 1890, 1893, 1896, 1899, 1902,
1905, 1908, 1911, 1914, 1917, 1923, 1927, 1931, 1935, 1939,
1943, 1963, 1970, 1981, 1982, 1983, 1986, 1987, 1990, 1994,
2004, 2008, 2012, 2016, 2020, 2024, 2028, 2034, 2040, 2048,
2056, 2062, 2069, 2085, 2107, 2111, 2117, 2120, 2123, 2127,
2137, 2141, 2160, 2168, 2169, 2181, 2182, 2185, 2189, 2195,
2199, 2205, 2209
};
#endif
@ -3825,12 +3825,13 @@ yyreduce:
(yyval.node)->endlineno = lineno;
(yyval.node)->noescape = noescape;
(yyval.node)->nosplit = nosplit;
(yyval.node)->nowritebarrier = nowritebarrier;
funcbody((yyval.node));
}
break;
case 206:
#line 1327 "go.y"
#line 1328 "go.y"
{
Node *t;
@ -3862,7 +3863,7 @@ yyreduce:
break;
case 207:
#line 1356 "go.y"
#line 1357 "go.y"
{
Node *rcvr, *t;
@ -3901,7 +3902,7 @@ yyreduce:
break;
case 208:
#line 1394 "go.y"
#line 1395 "go.y"
{
Sym *s;
Type *t;
@ -3929,7 +3930,7 @@ yyreduce:
break;
case 209:
#line 1419 "go.y"
#line 1420 "go.y"
{
(yyval.node) = methodname1(newname((yyvsp[(4) - (8)].sym)), (yyvsp[(2) - (8)].list)->n->right);
(yyval.node)->type = functype((yyvsp[(2) - (8)].list)->n, (yyvsp[(6) - (8)].list), (yyvsp[(8) - (8)].list));
@ -3948,7 +3949,7 @@ yyreduce:
break;
case 210:
#line 1437 "go.y"
#line 1438 "go.y"
{
(yyvsp[(3) - (5)].list) = checkarglist((yyvsp[(3) - (5)].list), 1);
(yyval.node) = nod(OTFUNC, N, N);
@ -3958,14 +3959,14 @@ yyreduce:
break;
case 211:
#line 1445 "go.y"
#line 1446 "go.y"
{
(yyval.list) = nil;
}
break;
case 212:
#line 1449 "go.y"
#line 1450 "go.y"
{
(yyval.list) = (yyvsp[(2) - (3)].list);
if((yyval.list) == nil)
@ -3974,21 +3975,21 @@ yyreduce:
break;
case 213:
#line 1457 "go.y"
#line 1458 "go.y"
{
(yyval.list) = nil;
}
break;
case 214:
#line 1461 "go.y"
#line 1462 "go.y"
{
(yyval.list) = list1(nod(ODCLFIELD, N, (yyvsp[(1) - (1)].node)));
}
break;
case 215:
#line 1465 "go.y"
#line 1466 "go.y"
{
(yyvsp[(2) - (3)].list) = checkarglist((yyvsp[(2) - (3)].list), 0);
(yyval.list) = (yyvsp[(2) - (3)].list);
@ -3996,14 +3997,14 @@ yyreduce:
break;
case 216:
#line 1472 "go.y"
#line 1473 "go.y"
{
closurehdr((yyvsp[(1) - (1)].node));
}
break;
case 217:
#line 1478 "go.y"
#line 1479 "go.y"
{
(yyval.node) = closurebody((yyvsp[(3) - (4)].list));
fixlbrace((yyvsp[(2) - (4)].i));
@ -4011,21 +4012,21 @@ yyreduce:
break;
case 218:
#line 1483 "go.y"
#line 1484 "go.y"
{
(yyval.node) = closurebody(nil);
}
break;
case 219:
#line 1494 "go.y"
#line 1495 "go.y"
{
(yyval.list) = nil;
}
break;
case 220:
#line 1498 "go.y"
#line 1499 "go.y"
{
(yyval.list) = concat((yyvsp[(1) - (3)].list), (yyvsp[(2) - (3)].list));
if(nsyntaxerrors == 0)
@ -4033,60 +4034,61 @@ yyreduce:
nointerface = 0;
noescape = 0;
nosplit = 0;
nowritebarrier = 0;
}
break;
case 222:
#line 1510 "go.y"
#line 1512 "go.y"
{
(yyval.list) = concat((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].list));
}
break;
case 224:
#line 1517 "go.y"
#line 1519 "go.y"
{
(yyval.list) = concat((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].list));
}
break;
case 225:
#line 1523 "go.y"
#line 1525 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 226:
#line 1527 "go.y"
#line 1529 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
break;
case 228:
#line 1534 "go.y"
#line 1536 "go.y"
{
(yyval.list) = concat((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].list));
}
break;
case 229:
#line 1540 "go.y"
#line 1542 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 230:
#line 1544 "go.y"
#line 1546 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
break;
case 231:
#line 1550 "go.y"
#line 1552 "go.y"
{
NodeList *l;
@ -4112,7 +4114,7 @@ yyreduce:
break;
case 232:
#line 1573 "go.y"
#line 1575 "go.y"
{
(yyvsp[(1) - (2)].node)->val = (yyvsp[(2) - (2)].val);
(yyval.list) = list1((yyvsp[(1) - (2)].node));
@ -4120,7 +4122,7 @@ yyreduce:
break;
case 233:
#line 1578 "go.y"
#line 1580 "go.y"
{
(yyvsp[(2) - (4)].node)->val = (yyvsp[(4) - (4)].val);
(yyval.list) = list1((yyvsp[(2) - (4)].node));
@ -4129,7 +4131,7 @@ yyreduce:
break;
case 234:
#line 1584 "go.y"
#line 1586 "go.y"
{
(yyvsp[(2) - (3)].node)->right = nod(OIND, (yyvsp[(2) - (3)].node)->right, N);
(yyvsp[(2) - (3)].node)->val = (yyvsp[(3) - (3)].val);
@ -4138,7 +4140,7 @@ yyreduce:
break;
case 235:
#line 1590 "go.y"
#line 1592 "go.y"
{
(yyvsp[(3) - (5)].node)->right = nod(OIND, (yyvsp[(3) - (5)].node)->right, N);
(yyvsp[(3) - (5)].node)->val = (yyvsp[(5) - (5)].val);
@ -4148,7 +4150,7 @@ yyreduce:
break;
case 236:
#line 1597 "go.y"
#line 1599 "go.y"
{
(yyvsp[(3) - (5)].node)->right = nod(OIND, (yyvsp[(3) - (5)].node)->right, N);
(yyvsp[(3) - (5)].node)->val = (yyvsp[(5) - (5)].val);
@ -4158,7 +4160,7 @@ yyreduce:
break;
case 237:
#line 1606 "go.y"
#line 1608 "go.y"
{
Node *n;
@ -4170,7 +4172,7 @@ yyreduce:
break;
case 238:
#line 1615 "go.y"
#line 1617 "go.y"
{
Pkg *pkg;
@ -4186,14 +4188,14 @@ yyreduce:
break;
case 239:
#line 1630 "go.y"
#line 1632 "go.y"
{
(yyval.node) = embedded((yyvsp[(1) - (1)].sym), localpkg);
}
break;
case 240:
#line 1636 "go.y"
#line 1638 "go.y"
{
(yyval.node) = nod(ODCLFIELD, (yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].node));
ifacedcl((yyval.node));
@ -4201,14 +4203,14 @@ yyreduce:
break;
case 241:
#line 1641 "go.y"
#line 1643 "go.y"
{
(yyval.node) = nod(ODCLFIELD, N, oldname((yyvsp[(1) - (1)].sym)));
}
break;
case 242:
#line 1645 "go.y"
#line 1647 "go.y"
{
(yyval.node) = nod(ODCLFIELD, N, oldname((yyvsp[(2) - (3)].sym)));
yyerror("cannot parenthesize embedded type");
@ -4216,7 +4218,7 @@ yyreduce:
break;
case 243:
#line 1652 "go.y"
#line 1654 "go.y"
{
// without func keyword
(yyvsp[(2) - (4)].list) = checkarglist((yyvsp[(2) - (4)].list), 1);
@ -4227,7 +4229,7 @@ yyreduce:
break;
case 245:
#line 1666 "go.y"
#line 1668 "go.y"
{
(yyval.node) = nod(ONONAME, N, N);
(yyval.node)->sym = (yyvsp[(1) - (2)].sym);
@ -4236,7 +4238,7 @@ yyreduce:
break;
case 246:
#line 1672 "go.y"
#line 1674 "go.y"
{
(yyval.node) = nod(ONONAME, N, N);
(yyval.node)->sym = (yyvsp[(1) - (2)].sym);
@ -4245,56 +4247,56 @@ yyreduce:
break;
case 248:
#line 1681 "go.y"
#line 1683 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 249:
#line 1685 "go.y"
#line 1687 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
break;
case 250:
#line 1690 "go.y"
#line 1692 "go.y"
{
(yyval.list) = nil;
}
break;
case 251:
#line 1694 "go.y"
#line 1696 "go.y"
{
(yyval.list) = (yyvsp[(1) - (2)].list);
}
break;
case 252:
#line 1702 "go.y"
#line 1704 "go.y"
{
(yyval.node) = N;
}
break;
case 254:
#line 1707 "go.y"
#line 1709 "go.y"
{
(yyval.node) = liststmt((yyvsp[(1) - (1)].list));
}
break;
case 256:
#line 1712 "go.y"
#line 1714 "go.y"
{
(yyval.node) = N;
}
break;
case 262:
#line 1723 "go.y"
#line 1725 "go.y"
{
(yyvsp[(1) - (2)].node) = nod(OLABEL, (yyvsp[(1) - (2)].node), N);
(yyvsp[(1) - (2)].node)->sym = dclstack; // context, for goto restrictions
@ -4302,7 +4304,7 @@ yyreduce:
break;
case 263:
#line 1728 "go.y"
#line 1730 "go.y"
{
NodeList *l;
@ -4315,7 +4317,7 @@ yyreduce:
break;
case 264:
#line 1738 "go.y"
#line 1740 "go.y"
{
// will be converted to OFALL
(yyval.node) = nod(OXFALL, N, N);
@ -4324,35 +4326,35 @@ yyreduce:
break;
case 265:
#line 1744 "go.y"
#line 1746 "go.y"
{
(yyval.node) = nod(OBREAK, (yyvsp[(2) - (2)].node), N);
}
break;
case 266:
#line 1748 "go.y"
#line 1750 "go.y"
{
(yyval.node) = nod(OCONTINUE, (yyvsp[(2) - (2)].node), N);
}
break;
case 267:
#line 1752 "go.y"
#line 1754 "go.y"
{
(yyval.node) = nod(OPROC, (yyvsp[(2) - (2)].node), N);
}
break;
case 268:
#line 1756 "go.y"
#line 1758 "go.y"
{
(yyval.node) = nod(ODEFER, (yyvsp[(2) - (2)].node), N);
}
break;
case 269:
#line 1760 "go.y"
#line 1762 "go.y"
{
(yyval.node) = nod(OGOTO, (yyvsp[(2) - (2)].node), N);
(yyval.node)->sym = dclstack; // context, for goto restrictions
@ -4360,7 +4362,7 @@ yyreduce:
break;
case 270:
#line 1765 "go.y"
#line 1767 "go.y"
{
(yyval.node) = nod(ORETURN, N, N);
(yyval.node)->list = (yyvsp[(2) - (2)].list);
@ -4380,7 +4382,7 @@ yyreduce:
break;
case 271:
#line 1784 "go.y"
#line 1786 "go.y"
{
(yyval.list) = nil;
if((yyvsp[(1) - (1)].node) != N)
@ -4389,7 +4391,7 @@ yyreduce:
break;
case 272:
#line 1790 "go.y"
#line 1792 "go.y"
{
(yyval.list) = (yyvsp[(1) - (3)].list);
if((yyvsp[(3) - (3)].node) != N)
@ -4398,189 +4400,189 @@ yyreduce:
break;
case 273:
#line 1798 "go.y"
#line 1800 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 274:
#line 1802 "go.y"
#line 1804 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
break;
case 275:
#line 1808 "go.y"
#line 1810 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 276:
#line 1812 "go.y"
#line 1814 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
break;
case 277:
#line 1818 "go.y"
#line 1820 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 278:
#line 1822 "go.y"
#line 1824 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
break;
case 279:
#line 1828 "go.y"
#line 1830 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 280:
#line 1832 "go.y"
#line 1834 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
break;
case 281:
#line 1841 "go.y"
#line 1843 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 282:
#line 1845 "go.y"
#line 1847 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 283:
#line 1849 "go.y"
#line 1851 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
break;
case 284:
#line 1853 "go.y"
#line 1855 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
break;
case 285:
#line 1858 "go.y"
#line 1860 "go.y"
{
(yyval.list) = nil;
}
break;
case 286:
#line 1862 "go.y"
#line 1864 "go.y"
{
(yyval.list) = (yyvsp[(1) - (2)].list);
}
break;
case 291:
#line 1876 "go.y"
#line 1878 "go.y"
{
(yyval.node) = N;
}
break;
case 293:
#line 1882 "go.y"
#line 1884 "go.y"
{
(yyval.list) = nil;
}
break;
case 295:
#line 1888 "go.y"
#line 1890 "go.y"
{
(yyval.node) = N;
}
break;
case 297:
#line 1894 "go.y"
#line 1896 "go.y"
{
(yyval.list) = nil;
}
break;
case 299:
#line 1900 "go.y"
#line 1902 "go.y"
{
(yyval.list) = nil;
}
break;
case 301:
#line 1906 "go.y"
#line 1908 "go.y"
{
(yyval.list) = nil;
}
break;
case 303:
#line 1912 "go.y"
#line 1914 "go.y"
{
(yyval.val).ctype = CTxxx;
}
break;
case 305:
#line 1922 "go.y"
#line 1924 "go.y"
{
importimport((yyvsp[(2) - (4)].sym), (yyvsp[(3) - (4)].val).u.sval);
}
break;
case 306:
#line 1926 "go.y"
#line 1928 "go.y"
{
importvar((yyvsp[(2) - (4)].sym), (yyvsp[(3) - (4)].type));
}
break;
case 307:
#line 1930 "go.y"
#line 1932 "go.y"
{
importconst((yyvsp[(2) - (5)].sym), types[TIDEAL], (yyvsp[(4) - (5)].node));
}
break;
case 308:
#line 1934 "go.y"
#line 1936 "go.y"
{
importconst((yyvsp[(2) - (6)].sym), (yyvsp[(3) - (6)].type), (yyvsp[(5) - (6)].node));
}
break;
case 309:
#line 1938 "go.y"
#line 1940 "go.y"
{
importtype((yyvsp[(2) - (4)].type), (yyvsp[(3) - (4)].type));
}
break;
case 310:
#line 1942 "go.y"
#line 1944 "go.y"
{
if((yyvsp[(2) - (4)].node) == N) {
dclcontext = PEXTERN; // since we skip the funcbody below
@ -4601,7 +4603,7 @@ yyreduce:
break;
case 311:
#line 1962 "go.y"
#line 1964 "go.y"
{
(yyval.sym) = (yyvsp[(1) - (1)].sym);
structpkg = (yyval.sym)->pkg;
@ -4609,7 +4611,7 @@ yyreduce:
break;
case 312:
#line 1969 "go.y"
#line 1971 "go.y"
{
(yyval.type) = pkgtype((yyvsp[(1) - (1)].sym));
importsym((yyvsp[(1) - (1)].sym), OTYPE);
@ -4617,14 +4619,14 @@ yyreduce:
break;
case 318:
#line 1989 "go.y"
#line 1991 "go.y"
{
(yyval.type) = pkgtype((yyvsp[(1) - (1)].sym));
}
break;
case 319:
#line 1993 "go.y"
#line 1995 "go.y"
{
// predefined name like uint8
(yyvsp[(1) - (1)].sym) = pkglookup((yyvsp[(1) - (1)].sym)->name, builtinpkg);
@ -4637,49 +4639,49 @@ yyreduce:
break;
case 320:
#line 2003 "go.y"
#line 2005 "go.y"
{
(yyval.type) = aindex(N, (yyvsp[(3) - (3)].type));
}
break;
case 321:
#line 2007 "go.y"
#line 2009 "go.y"
{
(yyval.type) = aindex(nodlit((yyvsp[(2) - (4)].val)), (yyvsp[(4) - (4)].type));
}
break;
case 322:
#line 2011 "go.y"
#line 2013 "go.y"
{
(yyval.type) = maptype((yyvsp[(3) - (5)].type), (yyvsp[(5) - (5)].type));
}
break;
case 323:
#line 2015 "go.y"
#line 2017 "go.y"
{
(yyval.type) = tostruct((yyvsp[(3) - (4)].list));
}
break;
case 324:
#line 2019 "go.y"
#line 2021 "go.y"
{
(yyval.type) = tointerface((yyvsp[(3) - (4)].list));
}
break;
case 325:
#line 2023 "go.y"
#line 2025 "go.y"
{
(yyval.type) = ptrto((yyvsp[(2) - (2)].type));
}
break;
case 326:
#line 2027 "go.y"
#line 2029 "go.y"
{
(yyval.type) = typ(TCHAN);
(yyval.type)->type = (yyvsp[(2) - (2)].type);
@ -4688,7 +4690,7 @@ yyreduce:
break;
case 327:
#line 2033 "go.y"
#line 2035 "go.y"
{
(yyval.type) = typ(TCHAN);
(yyval.type)->type = (yyvsp[(3) - (4)].type);
@ -4697,7 +4699,7 @@ yyreduce:
break;
case 328:
#line 2039 "go.y"
#line 2041 "go.y"
{
(yyval.type) = typ(TCHAN);
(yyval.type)->type = (yyvsp[(3) - (3)].type);
@ -4706,7 +4708,7 @@ yyreduce:
break;
case 329:
#line 2047 "go.y"
#line 2049 "go.y"
{
(yyval.type) = typ(TCHAN);
(yyval.type)->type = (yyvsp[(3) - (3)].type);
@ -4715,14 +4717,14 @@ yyreduce:
break;
case 330:
#line 2055 "go.y"
#line 2057 "go.y"
{
(yyval.type) = functype(nil, (yyvsp[(3) - (5)].list), (yyvsp[(5) - (5)].list));
}
break;
case 331:
#line 2061 "go.y"
#line 2063 "go.y"
{
(yyval.node) = nod(ODCLFIELD, N, typenod((yyvsp[(2) - (3)].type)));
if((yyvsp[(1) - (3)].sym))
@ -4732,7 +4734,7 @@ yyreduce:
break;
case 332:
#line 2068 "go.y"
#line 2070 "go.y"
{
Type *t;
@ -4749,7 +4751,7 @@ yyreduce:
break;
case 333:
#line 2084 "go.y"
#line 2086 "go.y"
{
Sym *s;
Pkg *p;
@ -4772,49 +4774,49 @@ yyreduce:
break;
case 334:
#line 2106 "go.y"
#line 2108 "go.y"
{
(yyval.node) = nod(ODCLFIELD, newname((yyvsp[(1) - (5)].sym)), typenod(functype(fakethis(), (yyvsp[(3) - (5)].list), (yyvsp[(5) - (5)].list))));
}
break;
case 335:
#line 2110 "go.y"
#line 2112 "go.y"
{
(yyval.node) = nod(ODCLFIELD, N, typenod((yyvsp[(1) - (1)].type)));
}
break;
case 336:
#line 2115 "go.y"
#line 2117 "go.y"
{
(yyval.list) = nil;
}
break;
case 338:
#line 2122 "go.y"
#line 2124 "go.y"
{
(yyval.list) = (yyvsp[(2) - (3)].list);
}
break;
case 339:
#line 2126 "go.y"
#line 2128 "go.y"
{
(yyval.list) = list1(nod(ODCLFIELD, N, typenod((yyvsp[(1) - (1)].type))));
}
break;
case 340:
#line 2136 "go.y"
#line 2138 "go.y"
{
(yyval.node) = nodlit((yyvsp[(1) - (1)].val));
}
break;
case 341:
#line 2140 "go.y"
#line 2142 "go.y"
{
(yyval.node) = nodlit((yyvsp[(2) - (2)].val));
switch((yyval.node)->val.ctype){
@ -4836,7 +4838,7 @@ yyreduce:
break;
case 342:
#line 2159 "go.y"
#line 2161 "go.y"
{
(yyval.node) = oldname(pkglookup((yyvsp[(1) - (1)].sym)->name, builtinpkg));
if((yyval.node)->op != OLITERAL)
@ -4845,7 +4847,7 @@ yyreduce:
break;
case 344:
#line 2168 "go.y"
#line 2170 "go.y"
{
if((yyvsp[(2) - (5)].node)->val.ctype == CTRUNE && (yyvsp[(4) - (5)].node)->val.ctype == CTINT) {
(yyval.node) = (yyvsp[(2) - (5)].node);
@ -4859,42 +4861,42 @@ yyreduce:
break;
case 347:
#line 2184 "go.y"
#line 2186 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 348:
#line 2188 "go.y"
#line 2190 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
break;
case 349:
#line 2194 "go.y"
#line 2196 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 350:
#line 2198 "go.y"
#line 2200 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
break;
case 351:
#line 2204 "go.y"
#line 2206 "go.y"
{
(yyval.list) = list1((yyvsp[(1) - (1)].node));
}
break;
case 352:
#line 2208 "go.y"
#line 2210 "go.y"
{
(yyval.list) = list((yyvsp[(1) - (3)].list), (yyvsp[(3) - (3)].node));
}
@ -4902,7 +4904,7 @@ yyreduce:
/* Line 1267 of yacc.c. */
#line 4907 "y.tab.c"
#line 4909 "y.tab.c"
default: break;
}
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
@ -5116,7 +5118,7 @@ yyreturn:
}
#line 2212 "go.y"
#line 2214 "go.y"
static void

View file

@ -18,7 +18,7 @@
//
// The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
// Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
// On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978),
// On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978),
// 966-975.
// For journal quality proofs that these steps are complete, correct, and terminate see
// Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
@ -43,7 +43,7 @@
// 9. Wait for all P's to acknowledge phase change.
// 10. Malloc now allocates black objects, so number of unmarked reachable objects
// monotonically decreases.
// 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet
// 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet
// reachable objects.
// 12. When GC completes a full cycle over P's and discovers no new grey
// objects, (which means all reachable objects are marked) set phase = GCsweep.
@ -96,7 +96,7 @@
// that many pages into heap. Together these two measures ensure that we don't surpass
// target next_gc value by a large margin. There is an exception: if a goroutine sweeps
// and frees two nonadjacent one-page spans to the heap, it will allocate a new two-page span,
// but there can still be other one-page unswept spans which could be combined into a
// but there can still be other one-page unswept spans which could be combined into a
// two-page span.
// It's critical to ensure that no operations proceed on unswept spans (that would corrupt
// mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
@ -212,6 +212,7 @@ var work workdata
var weak_cgo_allocate byte
// Is _cgo_allocate linked into the binary?
//go:nowritebarrier
func have_cgo_allocate() bool {
return &weak_cgo_allocate != nil
}
@ -242,6 +243,7 @@ var (
// Is address b in the known heap. If it doesn't have a valid gcmap
// returns false. For example pointers into stacks will return false.
//go:nowritebarrier
func inheap(b uintptr) bool {
if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
return false
@ -259,9 +261,10 @@ func inheap(b uintptr) bool {
// Given an address in the heap return the relevant byte from the gcmap. This routine
// can be used on addresses to the start of an object or to the interior of the an object.
//go:nowritebarrier
func slottombits(obj uintptr, mbits *markbits) {
off := (obj&^(ptrSize-1) - mheap_.arena_start) / ptrSize
mbits.bitp = (*byte)(unsafe.Pointer(mheap_.arena_start - off/wordsPerBitmapByte - 1))
*(*uintptr)(unsafe.Pointer(&mbits.bitp)) = mheap_.arena_start - off/wordsPerBitmapByte - 1
mbits.shift = off % wordsPerBitmapByte * gcBits
mbits.xbits = *mbits.bitp
mbits.bits = (mbits.xbits >> mbits.shift) & bitMask
@ -273,6 +276,7 @@ func slottombits(obj uintptr, mbits *markbits) {
// Set mbits to the associated bits from the bit map.
// If b is not a valid heap object return nil and
// undefined values in mbits.
//go:nowritebarrier
func objectstart(b uintptr, mbits *markbits) uintptr {
obj := b &^ (ptrSize - 1)
for {
@ -331,6 +335,7 @@ func objectstart(b uintptr, mbits *markbits) uintptr {
// speed is not critical at this point.
var andlock mutex
//go:nowritebarrier
func atomicand8(src *byte, val byte) {
lock(&andlock)
*src &= val
@ -338,6 +343,7 @@ func atomicand8(src *byte, val byte) {
}
// Mark using the checkmark scheme.
//go:nowritebarrier
func docheckmark(mbits *markbits) {
// xor 01 moves 01(scalar unmarked) to 00(scalar marked)
// and 10(pointer unmarked) to 11(pointer marked)
@ -354,6 +360,7 @@ func docheckmark(mbits *markbits) {
}
// In the default scheme does mbits refer to a marked object.
//go:nowritebarrier
func ismarked(mbits *markbits) bool {
if mbits.bits&bitBoundary != bitBoundary {
gothrow("ismarked: bits should have boundary bit set")
@ -362,6 +369,7 @@ func ismarked(mbits *markbits) bool {
}
// In the checkmark scheme does mbits refer to a marked object.
//go:nowritebarrier
func ischeckmarked(mbits *markbits) bool {
if mbits.bits&bitBoundary != bitBoundary {
gothrow("ischeckmarked: bits should have boundary bit set")
@ -370,6 +378,7 @@ func ischeckmarked(mbits *markbits) bool {
}
// When in GCmarkterminate phase we allocate black.
//go:nowritebarrier
func gcmarknewobject_m(obj uintptr) {
if gcphase != _GCmarktermination {
gothrow("marking new object while not in mark termination phase")
@ -399,6 +408,7 @@ func gcmarknewobject_m(obj uintptr) {
// obj is the start of an object with mark mbits.
// If it isn't already marked, mark it and enqueue into workbuf.
// Return possibly new workbuf to use.
//go:nowritebarrier
func greyobject(obj uintptr, mbits *markbits, wbuf *workbuf) *workbuf {
// obj should be start of allocation, and so must be at least pointer-aligned.
if obj&(ptrSize-1) != 0 {
@ -480,6 +490,7 @@ func greyobject(obj uintptr, mbits *markbits, wbuf *workbuf) *workbuf {
// If ptrmask == nil, the GC bitmap should be consulted.
// In this case, n may be an overestimate of the size; the GC bitmap
// must also be used to make sure the scan stops at the end of b.
//go:nowritebarrier
func scanobject(b, n uintptr, ptrmask *uint8, wbuf *workbuf) *workbuf {
arena_start := mheap_.arena_start
arena_used := mheap_.arena_used
@ -563,6 +574,7 @@ func scanobject(b, n uintptr, ptrmask *uint8, wbuf *workbuf) *workbuf {
// Otherwise it traverses some fraction of the pointers it found in b, recursively.
// As a special case, scanblock(nil, 0, nil) means to scan previously queued work,
// stopping only when no work is left in the system.
//go:nowritebarrier
func scanblock(b, n uintptr, ptrmask *uint8) {
wbuf := getpartialorempty()
if b != 0 {
@ -618,6 +630,7 @@ func scanblock(b, n uintptr, ptrmask *uint8) {
}
}
//go:nowritebarrier
func markroot(desc *parfor, i uint32) {
// Note: if you add a case here, please also update heapdump.c:dumproots.
switch i {
@ -718,6 +731,7 @@ func markroot(desc *parfor, i uint32) {
// Get an empty work buffer off the work.empty list,
// allocating new buffers as needed.
//go:nowritebarrier
func getempty(b *workbuf) *workbuf {
if b != nil {
putfull(b)
@ -738,6 +752,7 @@ func getempty(b *workbuf) *workbuf {
return b
}
//go:nowritebarrier
func putempty(b *workbuf) {
if b.nobj != 0 {
gothrow("putempty: b->nobj not 0")
@ -745,6 +760,7 @@ func putempty(b *workbuf) {
lfstackpush(&work.empty, &b.node)
}
//go:nowritebarrier
func putfull(b *workbuf) {
if b.nobj <= 0 {
gothrow("putfull: b->nobj <= 0")
@ -754,6 +770,7 @@ func putfull(b *workbuf) {
// Get an partially empty work buffer
// if none are available get an empty one.
//go:nowritebarrier
func getpartialorempty() *workbuf {
b := (*workbuf)(lfstackpop(&work.partial))
if b == nil {
@ -762,6 +779,7 @@ func getpartialorempty() *workbuf {
return b
}
//go:nowritebarrier
func putpartial(b *workbuf) {
if b.nobj == 0 {
lfstackpush(&work.empty, &b.node)
@ -787,6 +805,7 @@ func putpartial(b *workbuf) {
// looking for work and thus not capable of creating new work.
// This is in fact the termination condition for the STW mark
// phase.
//go:nowritebarrier
func getfull(b *workbuf) *workbuf {
if b != nil {
putempty(b)
@ -830,6 +849,7 @@ func getfull(b *workbuf) *workbuf {
}
}
//go:nowritebarrier
func handoff(b *workbuf) *workbuf {
// Make new buffer with half of b's pointers.
b1 := getempty(nil)
@ -846,6 +866,7 @@ func handoff(b *workbuf) *workbuf {
return b1
}
//go:nowritebarrier
func stackmapdata(stkmap *stackmap, n int32) bitvector {
if n < 0 || n >= stkmap.n {
gothrow("stackmapdata: index out of range")
@ -854,6 +875,7 @@ func stackmapdata(stkmap *stackmap, n int32) bitvector {
}
// Scan a stack frame: local variables and function arguments/results.
//go:nowritebarrier
func scanframe(frame *stkframe, unused unsafe.Pointer) bool {
f := frame.fn
@ -925,6 +947,7 @@ func scanframe(frame *stkframe, unused unsafe.Pointer) bool {
return true
}
//go:nowritebarrier
func scanstack(gp *g) {
if readgstatus(gp)&_Gscan == 0 {
@ -963,6 +986,7 @@ func scanstack(gp *g) {
// The slot is grey if its mark bit is set and it is enqueued to be scanned.
// The slot is black if it has already been scanned.
// It is white if it has a valid mark bit and the bit is not set.
//go:nowritebarrier
func shaded(slot uintptr) bool {
if !inheap(slot) { // non-heap slots considered grey
return true
@ -983,6 +1007,7 @@ func shaded(slot uintptr) bool {
// Shade the object if it isn't already.
// The object is not nil and known to be in the heap.
//go:nowritebarrier
func shade(b uintptr) {
if !inheap(b) {
gothrow("shade: passed an address not in the heap")
@ -1033,6 +1058,7 @@ func shade(b uintptr) {
// answer is yes without inserting a memory barriers between the st and the ld.
// These barriers are expensive so we have decided that we will
// always grey the ptr object regardless of the slot's color.
//go:nowritebarrier
func gcmarkwb_m(slot *uintptr, ptr uintptr) {
switch gcphase {
default:
@ -1050,6 +1076,7 @@ func gcmarkwb_m(slot *uintptr, ptr uintptr) {
// The gp has been moved to a GC safepoint. GC phase specific
// work is done here.
//go:nowritebarrier
func gcphasework(gp *g) {
switch gcphase {
default:
@ -1095,6 +1122,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
lock(&finlock)
if finq == nil || finq.cnt == int32(len(finq.fin)) {
if finc == nil {
// Note: write barrier here, assigning to finc, but should be okay.
finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
finc.alllink = allfin
allfin = finc
@ -1131,6 +1159,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
unlock(&finlock)
}
//go:nowritebarrier
func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
for fb := allfin; fb != nil; fb = fb.alllink {
for i := int32(0); i < fb.cnt; i++ {
@ -1141,6 +1170,7 @@ func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrt
}
// Returns only when span s has been swept.
//go:nowritebarrier
func mSpan_EnsureSwept(s *mspan) {
// Caller must disable preemption.
// Otherwise when this function returns the span can become unswept again
@ -1170,6 +1200,7 @@ func mSpan_EnsureSwept(s *mspan) {
// Returns true if the span was returned to heap.
// If preserve=true, don't return it to heap nor relink in MCentral lists;
// caller takes care of it.
//TODO go:nowritebarrier
func mSpan_Sweep(s *mspan, preserve bool) bool {
if checkmark {
gothrow("MSpan_Sweep: checkmark only runs in STW and after the sweep")
@ -1380,6 +1411,7 @@ var sweep sweepdata
// sweeps one span
// returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
//go:nowritebarrier
func sweepone() uintptr {
_g_ := getg()
@ -1411,6 +1443,7 @@ func sweepone() uintptr {
}
}
//go:nowritebarrier
func gosweepone() uintptr {
var ret uintptr
systemstack(func() {
@ -1419,10 +1452,12 @@ func gosweepone() uintptr {
return ret
}
//go:nowritebarrier
func gosweepdone() bool {
return mheap_.sweepdone != 0
}
//go:nowritebarrier
func gchelper() {
_g_ := getg()
_g_.m.traceback = 2
@ -1441,6 +1476,7 @@ func gchelper() {
_g_.m.traceback = 0
}
//go:nowritebarrier
func cachestats() {
for i := 0; ; i++ {
p := allp[i]
@ -1455,6 +1491,7 @@ func cachestats() {
}
}
//go:nowritebarrier
func flushallmcaches() {
for i := 0; ; i++ {
p := allp[i]
@ -1470,6 +1507,7 @@ func flushallmcaches() {
}
}
//go:nowritebarrier
func updatememstats(stats *gcstats) {
if stats != nil {
*stats = gcstats{}
@ -1560,6 +1598,7 @@ func gcinit() {
}
// Called from malloc.go using onM, stopping and starting the world handled in caller.
//go:nowritebarrier
func gc_m(start_time int64, eagersweep bool) {
_g_ := getg()
gp := _g_.m.curg
@ -1579,6 +1618,7 @@ func gc_m(start_time int64, eagersweep bool) {
// For the second case it is possible to restore the BitsDead pattern but since
// clearmark is a debug tool performance has a lower priority than simplicity.
// The span is MSpanInUse and the world is stopped.
//go:nowritebarrier
func clearcheckmarkbitsspan(s *mspan) {
if s.state != _MSpanInUse {
print("runtime:clearcheckmarkbitsspan: state=", s.state, "\n")
@ -1707,6 +1747,7 @@ func clearcheckmarkbitsspan(s *mspan) {
// BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
// This is a bit expensive but preserves the BitsDead encoding during the normal marking.
// BitsDead remains valid for every nibble except the ones with BitsBoundary set.
//go:nowritebarrier
func clearcheckmarkbits() {
for _, s := range work.spans {
if s.state == _MSpanInUse {
@ -1720,6 +1761,7 @@ func clearcheckmarkbits() {
// using the bitMarkedCheck bit instead of the
// bitMarked bit. If the marking encounters an
// bitMarked bit that is not set then we throw.
//go:nowritebarrier
func gccheckmark_m(startTime int64, eagersweep bool) {
if !gccheckmarkenable {
return
@ -1736,14 +1778,17 @@ func gccheckmark_m(startTime int64, eagersweep bool) {
clearcheckmarkbits()
}
//go:nowritebarrier
func gccheckmarkenable_m() {
gccheckmarkenable = true
}
//go:nowritebarrier
func gccheckmarkdisable_m() {
gccheckmarkenable = false
}
//go:nowritebarrier
func finishsweep_m() {
// The world is stopped so we should be able to complete the sweeps
// quickly.
@ -1764,6 +1809,7 @@ func finishsweep_m() {
// Scan all of the stacks, greying (or graying if in America) the referents
// but not blackening them since the mark write barrier isn't installed.
//go:nowritebarrier
func gcscan_m() {
_g_ := getg()
@ -1813,22 +1859,26 @@ func gcscan_m() {
}
// Mark all objects that are known about.
//go:nowritebarrier
func gcmark_m() {
scanblock(0, 0, nil)
}
// For now this must be bracketed with a stoptheworld and a starttheworld to ensure
// all go routines see the new barrier.
//go:nowritebarrier
func gcinstallmarkwb_m() {
gcphase = _GCmark
}
// For now this must be bracketed with a stoptheworld and a starttheworld to ensure
// all go routines see the new barrier.
//go:nowritebarrier
func gcinstalloffwb_m() {
gcphase = _GCoff
}
//TODO go:nowritebarrier
func gc(start_time int64, eagersweep bool) {
if _DebugGCPtrs {
print("GC start\n")
@ -2100,6 +2150,7 @@ func wakefing() *g {
return res
}
//go:nowritebarrier
func addb(p *byte, n uintptr) *byte {
return (*byte)(add(unsafe.Pointer(p), n))
}
@ -2108,6 +2159,7 @@ func addb(p *byte, n uintptr) *byte {
// mask is where to store the result.
// ppos is a pointer to position in mask, in bits.
// sparse says to generate 4-bits per word mask for heap (2-bits for data/bss otherwise).
//go:nowritebarrier
func unrollgcprog1(maskp *byte, prog *byte, ppos *uintptr, inplace, sparse bool) *byte {
arena_start := mheap_.arena_start
pos := *ppos
@ -2219,6 +2271,7 @@ func unrollgcproginplace_m(v unsafe.Pointer, typ *_type, size, size0 uintptr) {
var unroll mutex
// Unrolls GC program in typ.gc[1] into typ.gc[0]
//go:nowritebarrier
func unrollgcprog_m(typ *_type) {
lock(&unroll)
mask := (*byte)(unsafe.Pointer(uintptr(typ.gc[0])))
@ -2243,6 +2296,7 @@ func unrollgcprog_m(typ *_type) {
// mark the span of memory at v as having n blocks of the given size.
// if leftover is true, there is left over space at the end of the span.
//go:nowritebarrier
func markspan(v unsafe.Pointer, size uintptr, n uintptr, leftover bool) {
if uintptr(v)+size*n > mheap_.arena_used || uintptr(v) < mheap_.arena_start {
gothrow("markspan: bad pointer")
@ -2288,6 +2342,7 @@ func markspan(v unsafe.Pointer, size uintptr, n uintptr, leftover bool) {
}
// unmark the span of memory at v of length n bytes.
//go:nowritebarrier
func unmarkspan(v, n uintptr) {
if v+n > mheap_.arena_used || v < mheap_.arena_start {
gothrow("markspan: bad pointer")
@ -2312,6 +2367,7 @@ func unmarkspan(v, n uintptr) {
memclr(unsafe.Pointer(b-n+1), n)
}
//go:nowritebarrier
func mHeap_MapBits(h *mheap) {
// Caller has added extra mappings to the arena.
// Add extra mappings of bitmap words as needed.

View file

@ -1577,6 +1577,7 @@ func goexit0(gp *g) {
}
//go:nosplit
//go:nowritebarrier
func save(pc, sp uintptr) {
_g_ := getg()
@ -1585,7 +1586,7 @@ func save(pc, sp uintptr) {
_g_.sched.lr = 0
_g_.sched.ret = 0
_g_.sched.ctxt = nil
// write as uintptr to avoid write barrier, which will smash _g_.sched.
// _g_.sched.g = _g_, but avoid write barrier, which smashes _g_.sched
*(*uintptr)(unsafe.Pointer(&_g_.sched.g)) = uintptr(unsafe.Pointer(_g_))
}