[dev.unified] all: merge master (8e1e64c) into dev.unified

Conflicts:

- test/run.go
Conflicts in the known-fails list, plus removed a test from the known-fails that now works.

Merge List:

+ 2022-07-19 8e1e64c16a cmd/compile: fix mknode script
+ 2022-07-19 28be440d34 A+C: add Weizhi Yan
+ 2022-07-19 85a482fc24 runtime: revert to using the precomputed trigger for pacer calculations
+ 2022-07-19 ae7340ab68 CONTRIBUTORS: update for the Go 1.19 release
+ 2022-07-18 de8101d21b runtime: fix typos
+ 2022-07-18 967a3d985d cmd/compile: revert "remove -installsuffix flag"
+ 2022-07-18 c0c1bbde17 http: improve Get documentation
+ 2022-07-15 2aa473cc54 go/types, types2: correct alignment of atomic.Int64
+ 2022-07-15 4651ebf961 encoding/gob: s/TestIngoreDepthLimit/TestIgnoreDepthLimit/
+ 2022-07-14 dc00aed6de go/parser: skip TestParseDepthLimit for short tests
+ 2022-07-14 783ff7dfc4 encoding/xml: skip TestCVE202230633 for short tests
+ 2022-07-14 aa80228526 cmd/go/internal/modfetch: avoid duplicating path components in Git fetch errors
+ 2022-07-14 b9d5a25442 cmd/go: save zip sums for downloaded modules in 'go mod download' in a workspace
+ 2022-07-14 a906d3dd09 cmd/go: avoid re-enqueuing workspace dependencies with errors
+ 2022-07-14 266c70c263 doc/go1.19: add a release note for 'go list -json=SomeField'
+ 2022-07-13 558785a0a9 cmd/compile: remove -installsuffix flag
+ 2022-07-13 1355ea3045 cmd/compile: remove -importmap flag
+ 2022-07-13 f71f3d1b86 misc/cgo/testshared: run tests only in GOPATH mode
+ 2022-07-13 feada53661 misc/cgo/testcshared: don't rely on an erroneous install target in tests
+ 2022-07-13 c006b7ac27 runtime: clear timerModifiedEarliest when last timer is deleted
+ 2022-07-13 923740a8cc cmd/compile: fix type assert in dict pass
+ 2022-07-12 bf2ef26be3 cmd/go: in script tests, avoid checking non-main packages for staleness
+ 2022-07-12 5f5cae7200 cmd/go: avoid indexing GOROOT packages when the compiler is 'gccgo'
+ 2022-07-12 c2edb2c841 cmd/go: port TestIssue16471 to a script test and add verbose logging
+ 2022-07-12 9c2526e637 cmd/go/internal/modfetch/codehost: add missing newline in '# lock' log message
+ 2022-07-12 85486bcccb image/jpeg: increase TestLargeImageWithShortData timeout by an order of magnitude
+ 2022-07-12 27794c4d4a cmd/go/internal/modload: ignore disallowed errors when checking for updates
+ 2022-07-12 b2b8872c87 compress/gzip: fix stack exhaustion bug in Reader.Read
+ 2022-07-12 ac68c6c683 path/filepath: fix stack exhaustion in Glob
+ 2022-07-12 fa2d41d0ca io/fs: fix stack exhaustion in Glob
+ 2022-07-12 6fa37e98ea encoding/gob: add a depth limit for ignored fields
+ 2022-07-12 695be961d5 go/parser: limit recursion depth
+ 2022-07-12 08c46ed43d encoding/xml: use iterative Skip, rather than recursive
+ 2022-07-12 c4c1993fd2 encoding/xml: limit depth of nesting in unmarshal
+ 2022-07-12 913d05133c cmd/go: avoid spurious readdir during fsys.Walk
+ 2022-07-12 d3d7998756 net/http: clarify that MaxBytesReader returns *MaxBytesError
+ 2022-07-11 126c22a098 syscall: gofmt after CL 412114
+ 2022-07-11 123a6328b7 internal/trace: don't report regions on system goroutines
+ 2022-07-11 846490110a runtime/race: update amd64 syso images to avoid sse4
+ 2022-07-11 b75ad09cae cmd/trace: fix typo in web documentation
+ 2022-07-11 7510e597de cmd/go: make module index loading O(1)
+ 2022-07-11 b8bf820d5d cmd/nm: don't rely on an erroneous install target in tests
+ 2022-07-11 ad641e8521 misc/cgo/testcarchive: don't rely on an erroneous install target in tests
+ 2022-07-11 bf5898ef53 net/url: use EscapedPath for url.JoinPath
+ 2022-07-11 398dcd1cf0 database/sql: make TestTxContextWaitNoDiscard test more robust
+ 2022-07-11 f956941b0f cmd/go: use package index for std in load.loadPackageData
+ 2022-07-11 59ab6f351a net/http: remove Content-Encoding in writeNotModified
+ 2022-07-08 c1a4e0fe01 cmd/compile: fix libfuzzer instrumentation line number
+ 2022-07-08 5c1a13e7a4 cmd/go: avoid setting variables for '/' and ':' in TestScript subprocess environments
+ 2022-07-08 180bcad33d net/http: wait for listeners to exit in Server.Close and Shutdown
+ 2022-07-08 14abe8aa73 cmd/compile: don't convert to interface{} for un-comparable types in generic switch
+ 2022-07-07 1ebc983000 runtime: overestimate the amount of allocated memory in heapLive
+ 2022-07-07 c177d9d98a crypto/x509: restrict CRL number to <=20 octets
+ 2022-07-07 486fc01770 crypto/x509: correctly parse CRL entry extensions
+ 2022-07-07 8ac58de185 crypto/x509: populate Number and AKI of parsed CRLs
+ 2022-07-07 0c7fcf6bd1 cmd/link: explicitly disable PIE for windows/amd64 -race mode
+ 2022-07-07 eaf2125654 cmd/go: default to "exe" build mode for windows -race
+ 2022-07-06 1243ec9c17 cmd/compile: only check implicit dots for method call enabled by a type bound
+ 2022-07-06 c391156f96 cmd/go: set up git identity for build_buildvcs_auto.txt
+ 2022-07-06 2acd3646fc cmd/compile: rework induction variable detector
+ 2022-07-06 53a4152d47 os/exec: clarify that Wait must be called
+ 2022-07-06 177306f630 cmd/internal/notsha256: add purego tag as needed
+ 2022-07-06 f4755fc733 cmd/dist: use purego tag when building the bootstrap binaries
+ 2022-07-06 4484c30f78 misc/cgo/test: make TestSetgidStress cheaper
+ 2022-07-06 2007599dc8 test: recognize new gofrontend error message
+ 2022-07-05 d602380f58 cmd/compile: drop "buildcfg" from no instrument packages
+ 2022-07-05 c111091071 cmd/go: make module@nonexistentversion failures reusable
+ 2022-07-05 5f305ae8e5 cmd/go: add -reuse flag to make proxy invocations more efficient
+ 2022-07-05 84e091eef0 cmd/go: record origin metadata during module download
+ 2022-07-04 ceda93ed67 build/constraint: update doc to mention a feature added in Go 1.17
+ 2022-07-04 3cf79d9610 runtime: pass correct string to exits on Plan 9
+ 2022-07-01 e822b1e26e net/http: omit invalid header value from error message
+ 2022-07-01 4a2a3bca18 cmd/go, go/build: clarify build constraint docs
+ 2022-07-01 9a4d5357f4 flag: highlight support for double dashes in docs
+ 2022-07-01 c847a2c9f0 go/types, types2: document that exported predicates are unspecified for invalid type arguments
+ 2022-06-30 405c269b85 go/types, types2: re-enable a couple of commented out tests
+ 2022-06-30 aad9382e59 go/doc/comment: support links in lists in comments
+ 2022-06-30 af725f4286 os: fix a typo in path_windows.go

Change-Id: I381728322188aca0bfa81a946d6aedda8c07903c
This commit is contained in:
David Chase 2022-07-19 16:55:13 -04:00
commit de649a2a98
143 changed files with 3596 additions and 1032 deletions

View file

@ -1438,6 +1438,7 @@ Wei Fu <fhfuwei@163.com>
Wei Guangjing <vcc.163@gmail.com> Wei Guangjing <vcc.163@gmail.com>
Weichao Tang <tevic.tt@gmail.com> Weichao Tang <tevic.tt@gmail.com>
Weixie Cui <cuiweixie@gmail.com> <523516579@qq.com> Weixie Cui <cuiweixie@gmail.com> <523516579@qq.com>
Weizhi Yan <yanweizhi@bytedance.com>
Wembley G. Leach, Jr <wembley.gl@gmail.com> Wembley G. Leach, Jr <wembley.gl@gmail.com>
Wen Yang <yangwen.yw@gmail.com> Wen Yang <yangwen.yw@gmail.com>
Will Faught <will.faught@gmail.com> Will Faught <will.faught@gmail.com>

View file

@ -25,10 +25,12 @@
# Please keep the list sorted. # Please keep the list sorted.
Aamir Khan <syst3m.w0rm@gmail.com> Aamir Khan <syst3m.w0rm@gmail.com>
Aaqa Ishtyaq <aaqaishtyaq@gmail.com>
Aaron Beitch <aaronb@arista.com> Aaron Beitch <aaronb@arista.com>
Aaron Bieber <deftly@gmail.com> Aaron Bieber <deftly@gmail.com>
Aaron Cannon <cannona@fireantproductions.com> Aaron Cannon <cannona@fireantproductions.com>
Aaron France <aaron.l.france@gmail.com> Aaron France <aaron.l.france@gmail.com>
Aaron Gable <aaron@letsencrypt.org>
Aaron Jacobs <jacobsa@google.com> Aaron Jacobs <jacobsa@google.com>
Aaron Jensen <jensen.aaro@gmail.com> Aaron Jensen <jensen.aaro@gmail.com>
Aaron Kemp <kemp.aaron@gmail.com> Aaron Kemp <kemp.aaron@gmail.com>
@ -38,6 +40,7 @@ Aaron Stein <aaronstein12@gmail.com>
Aaron Torres <tcboox@gmail.com> Aaron Torres <tcboox@gmail.com>
Aaron Zinman <aaron@azinman.com> Aaron Zinman <aaron@azinman.com>
Aarti Parikh <aarti.parikh@gmail.com> Aarti Parikh <aarti.parikh@gmail.com>
Aayush Agrawal <aayushagrawal.1111@gmail.com>
Abdullah Al Maruf <mymail.maruf@gmail.com> Abdullah Al Maruf <mymail.maruf@gmail.com>
Abe Haskins <abeisgreat@abeisgreat.com> Abe Haskins <abeisgreat@abeisgreat.com>
Abhinav Gupta <abhinav.g90@gmail.com> Abhinav Gupta <abhinav.g90@gmail.com>
@ -49,6 +52,7 @@ Adam Kisala <adam.kisala@gmail.com>
Adam Langley <agl@golang.org> Adam Langley <agl@golang.org>
Adam Medzinski <adam.medzinski@gmail.com> Adam Medzinski <adam.medzinski@gmail.com>
Adam Mitha <adam.mitha@gmail.com> Adam Mitha <adam.mitha@gmail.com>
Adam Pritchard <pritchard.adam@gmail.com>
Adam Shannon <adamkshannon@gmail.com> Adam Shannon <adamkshannon@gmail.com>
Adam Shelton <aashelt90@gmail.com> Adam Shelton <aashelt90@gmail.com>
Adam Sindelar <adamsh@google.com> Adam Sindelar <adamsh@google.com>
@ -98,8 +102,10 @@ Alberto Donizetti <alb.donizetti@gmail.com>
Alberto García Hierro <alberto@garciahierro.com> <alberto.garcia.hierro@gmail.com> Alberto García Hierro <alberto@garciahierro.com> <alberto.garcia.hierro@gmail.com>
Alec Benzer <alec.benzer@gmail.com> Alec Benzer <alec.benzer@gmail.com>
Alejandro García Montoro <alejandro.garciamontoro@gmail.com> Alejandro García Montoro <alejandro.garciamontoro@gmail.com>
Alejandro Sáez <asm@redhat.com>
Aleksa Sarai <cyphar@cyphar.com> Aleksa Sarai <cyphar@cyphar.com>
Aleksandar Dezelin <dezelin@gmail.com> Aleksandar Dezelin <dezelin@gmail.com>
Aleksandr Dobkin <dobkin@google.com>
Aleksandr Lukinykh <a.lukinykh@xsolla.com> Aleksandr Lukinykh <a.lukinykh@xsolla.com>
Aleksandr Razumov <ar@cydev.ru> Aleksandr Razumov <ar@cydev.ru>
Alekseev Artem <a.artem060@gmail.com> Alekseev Artem <a.artem060@gmail.com>
@ -107,6 +113,7 @@ Aleksei Tirman <aleksei.tirman@jetbrains.com>
Alessandro Arzilli <alessandro.arzilli@gmail.com> Alessandro Arzilli <alessandro.arzilli@gmail.com>
Alessandro Baffa <alessandro.baffa@gmail.com> Alessandro Baffa <alessandro.baffa@gmail.com>
Alex A Skinner <alex@lx.lc> Alex A Skinner <alex@lx.lc>
Alex Brachet <abrachet@google.com>
Alex Brainman <alex.brainman@gmail.com> Alex Brainman <alex.brainman@gmail.com>
Alex Bramley <abramley@google.com> Alex Bramley <abramley@google.com>
Alex Browne <stephenalexbrowne@gmail.com> Alex Browne <stephenalexbrowne@gmail.com>
@ -165,6 +172,7 @@ Alexey Borzenkov <snaury@gmail.com>
Alexey Naidonov <alexey.naidyonov@gmail.com> Alexey Naidonov <alexey.naidyonov@gmail.com>
Alexey Neganov <neganovalexey@gmail.com> Alexey Neganov <neganovalexey@gmail.com>
Alexey Palazhchenko <alexey.palazhchenko@gmail.com> Alexey Palazhchenko <alexey.palazhchenko@gmail.com>
Alexey Perevalov <alexej.perevalov@gmail.com>
Alexey Semenyuk <alexsemenyuk88@gmail.com> Alexey Semenyuk <alexsemenyuk88@gmail.com>
Alexey Vilenskiy <bynovhack@gmail.com> Alexey Vilenskiy <bynovhack@gmail.com>
Alexis Hildebrandt <surryhill@gmail.com> Alexis Hildebrandt <surryhill@gmail.com>
@ -275,6 +283,7 @@ Anthony Alves <cvballa3g0@gmail.com>
Anthony Canino <anthony.canino1@gmail.com> Anthony Canino <anthony.canino1@gmail.com>
Anthony Eufemio <anthony.eufemio@gmail.com> Anthony Eufemio <anthony.eufemio@gmail.com>
Anthony Fok <foka@debian.org> Anthony Fok <foka@debian.org>
Anthony Hamon <antham@users.noreply.github.com>
Anthony Martin <ality@pbrane.org> Anthony Martin <ality@pbrane.org>
Anthony Sottile <asottile@umich.edu> Anthony Sottile <asottile@umich.edu>
Anthony Starks <ajstarks@gmail.com> Anthony Starks <ajstarks@gmail.com>
@ -335,6 +344,7 @@ Avi Flax <avi@timehop.com>
Aviv Klasquin Komissar <avivklas@gmail.com> Aviv Klasquin Komissar <avivklas@gmail.com>
awaw fumin <awawfumin@gmail.com> awaw fumin <awawfumin@gmail.com>
Awn Umar <awn@cryptolosophy.io> Awn Umar <awn@cryptolosophy.io>
Axel Busch <94176305+abuschIBM@users.noreply.github.com>
Axel Wagner <axel.wagner.hh@googlemail.com> Axel Wagner <axel.wagner.hh@googlemail.com>
Ayan George <ayan@ayan.net> Ayan George <ayan@ayan.net>
Ayanamist Yang <ayanamist@gmail.com> Ayanamist Yang <ayanamist@gmail.com>
@ -364,6 +374,7 @@ Ben Laurie <ben@links.org> <benl@google.com>
Ben Lubar <ben.lubar@gmail.com> Ben Lubar <ben.lubar@gmail.com>
Ben Lynn <benlynn@gmail.com> Ben Lynn <benlynn@gmail.com>
Ben Olive <sionide21@gmail.com> Ben Olive <sionide21@gmail.com>
Ben Sarah Golightly <golightly.ben@googlemail.com>
Ben Schwartz <bemasc@google.com> Ben Schwartz <bemasc@google.com>
Ben Shi <powerman1st@163.com> <ben.shi@streamcomputing.com> Ben Shi <powerman1st@163.com> <ben.shi@streamcomputing.com>
Ben Toews <mastahyeti@gmail.com> Ben Toews <mastahyeti@gmail.com>
@ -379,6 +390,7 @@ Benny Siegert <bsiegert@gmail.com>
Benoit Sigoure <tsunanet@gmail.com> Benoit Sigoure <tsunanet@gmail.com>
Berengar Lehr <Berengar.Lehr@gmx.de> Berengar Lehr <Berengar.Lehr@gmx.de>
Berkant Ipek <41230766+0xbkt@users.noreply.github.com> Berkant Ipek <41230766+0xbkt@users.noreply.github.com>
Bernhard Valenti <bernhard.valenti@gmail.com>
Beth Brown <ecbrown@google.com> Beth Brown <ecbrown@google.com>
Bharath Kumar Uppala <uppala.bharath@gmail.com> Bharath Kumar Uppala <uppala.bharath@gmail.com>
Bharath Thiruveedula <tbharath91@gmail.com> Bharath Thiruveedula <tbharath91@gmail.com>
@ -429,6 +441,7 @@ Brave Cow <rsr715@gmail.com>
Brayden Cloud <bcloud@google.com> Brayden Cloud <bcloud@google.com>
Brendan Daniel Tracey <tracey.brendan@gmail.com> Brendan Daniel Tracey <tracey.brendan@gmail.com>
Brendan O'Dea <bod@golang.org> Brendan O'Dea <bod@golang.org>
Breno Andrade <breno.andrade.dev@gmail.com>
Brett Cannon <bcannon@gmail.com> Brett Cannon <bcannon@gmail.com>
Brett Merrill <brett.j.merrill94@gmail.com> Brett Merrill <brett.j.merrill94@gmail.com>
Brian Dellisanti <briandellisanti@gmail.com> Brian Dellisanti <briandellisanti@gmail.com>
@ -498,6 +511,7 @@ Charles Lee <zombie.fml@gmail.com>
Charles Weill <weill@google.com> Charles Weill <weill@google.com>
Charlie Getzen <charlie@bolt.com> Charlie Getzen <charlie@bolt.com>
Charlie Moog <moogcharlie@gmail.com> Charlie Moog <moogcharlie@gmail.com>
Charlie Vieth <charlie.vieth@gmail.com>
Charlotte Brandhorst-Satzkorn <catzkorn@gmail.com> Charlotte Brandhorst-Satzkorn <catzkorn@gmail.com>
Chauncy Cullitan <chauncyc@google.com> Chauncy Cullitan <chauncyc@google.com>
Chen Zhidong <njutczd@gmail.com> Chen Zhidong <njutczd@gmail.com>
@ -540,6 +554,7 @@ Christian Himpel <chressie@googlemail.com> <chressie@gmail.com>
Christian Muehlhaeuser <muesli@gmail.com> Christian Muehlhaeuser <muesli@gmail.com>
Christian Pellegrin <chri@evolware.org> Christian Pellegrin <chri@evolware.org>
Christian R. Petrin <christianpetrin@gmail.com> Christian R. Petrin <christianpetrin@gmail.com>
Christian Stewart <christian@paral.in>
Christian Svensson <blue@cmd.nu> Christian Svensson <blue@cmd.nu>
Christine Hansmann <chhansmann@gmail.com> Christine Hansmann <chhansmann@gmail.com>
Christoffer Buchholz <christoffer.buchholz@gmail.com> Christoffer Buchholz <christoffer.buchholz@gmail.com>
@ -580,7 +595,9 @@ Corne van der Plas <vdplas@gmail.com>
Cosmos Nicolaou <cnicolaou@google.com> Cosmos Nicolaou <cnicolaou@google.com>
Costin Chirvasuta <ctin@google.com> Costin Chirvasuta <ctin@google.com>
Craig Citro <craigcitro@google.com> Craig Citro <craigcitro@google.com>
Cristian Greco <sacrogemini@gmail.com>
Cristian Staretu <unclejacksons@gmail.com> Cristian Staretu <unclejacksons@gmail.com>
Cristiano Vitorino <57003922+cristianovitorino@users.noreply.github.com>
Cristo García <cgg.code@gmail.com> Cristo García <cgg.code@gmail.com>
cui fliter <imcusg@gmail.com> cui fliter <imcusg@gmail.com>
Cuihtlauac ALVARADO <cuihtlauac.alvarado@orange.com> Cuihtlauac ALVARADO <cuihtlauac.alvarado@orange.com>
@ -613,6 +630,7 @@ Daniel Cormier <danielc@knowbe4.com>
Daniël de Kok <me@danieldk.eu> Daniël de Kok <me@danieldk.eu>
Daniel Fleischman <danielfleischman@gmail.com> Daniel Fleischman <danielfleischman@gmail.com>
Daniel Ingram <ingramds@appstate.edu> Daniel Ingram <ingramds@appstate.edu>
Daniel Jakots <danieljakots@gmail.com>
Daniel Johansson <dajo2002@gmail.com> Daniel Johansson <dajo2002@gmail.com>
Daniel Kerwin <d.kerwin@gini.net> Daniel Kerwin <d.kerwin@gini.net>
Daniel Kessler <dkess@google.com> Daniel Kessler <dkess@google.com>
@ -688,6 +706,7 @@ David R. Jenni <david.r.jenni@gmail.com>
David Sansome <me@davidsansome.com> David Sansome <me@davidsansome.com>
David Stainton <dstainton415@gmail.com> David Stainton <dstainton415@gmail.com>
David Symonds <dsymonds@golang.org> David Symonds <dsymonds@golang.org>
David Taylor <tinystatemachine@gmail.com>
David Thomas <davidthomas426@gmail.com> David Thomas <davidthomas426@gmail.com>
David Timm <dtimm@pivotal.io> David Timm <dtimm@pivotal.io>
David Titarenco <david.titarenco@gmail.com> David Titarenco <david.titarenco@gmail.com>
@ -695,6 +714,7 @@ David Tolpin <david.tolpin@gmail.com>
David Url <david@urld.io> David Url <david@urld.io>
David Volquartz Lebech <david@lebech.info> David Volquartz Lebech <david@lebech.info>
David Wimmer <davidlwimmer@gmail.com> David Wimmer <davidlwimmer@gmail.com>
Davide Masserut <d.masserut@gmail.com>
Davies Liu <davies.liu@gmail.com> Davies Liu <davies.liu@gmail.com>
Davor Kapsa <davor.kapsa@gmail.com> Davor Kapsa <davor.kapsa@gmail.com>
Dean Eigenmann <7621705+decanus@users.noreply.github.com> Dean Eigenmann <7621705+decanus@users.noreply.github.com>
@ -762,6 +782,7 @@ Donovan Hide <donovanhide@gmail.com>
Doug Anderson <douga@google.com> Doug Anderson <douga@google.com>
Doug Fawley <dfawley@google.com> Doug Fawley <dfawley@google.com>
Douglas Danger Manley <doug.manley@gmail.com> Douglas Danger Manley <doug.manley@gmail.com>
Dragan Mladjenovic <Dragan.Mladjenovic@syrmia.com>
Drew Flower <drewvanstone@gmail.com> Drew Flower <drewvanstone@gmail.com>
Drew Hintz <adhintz@google.com> Drew Hintz <adhintz@google.com>
Drew Richardson <drewrichardson@gmail.com> Drew Richardson <drewrichardson@gmail.com>
@ -774,6 +795,7 @@ Dustin Sallings <dsallings@gmail.com>
Dustin Shields-Cloues <dcloues@gmail.com> Dustin Shields-Cloues <dcloues@gmail.com>
Dustin Spicuzza <dustin.spicuzza@gmail.com> Dustin Spicuzza <dustin.spicuzza@gmail.com>
Dvir Volk <dvir@everything.me> <dvirsky@gmail.com> Dvir Volk <dvir@everything.me> <dvirsky@gmail.com>
Dylan Le <dungtuanle@google.com>
Dylan Waits <dylan@waits.io> Dylan Waits <dylan@waits.io>
Ed Schouten <ed@nuxi.nl> Ed Schouten <ed@nuxi.nl>
Edan Bedrik <3d4nb3@gmail.com> Edan Bedrik <3d4nb3@gmail.com>
@ -785,6 +807,7 @@ Eduardo Villaseñor <evillasrmx@gmail.com>
Edward Muller <edwardam@interlix.com> Edward Muller <edwardam@interlix.com>
Egon Elbre <egonelbre@gmail.com> Egon Elbre <egonelbre@gmail.com>
Ehren Kret <ehren.kret@gmail.com> Ehren Kret <ehren.kret@gmail.com>
Eisuke Takahashi <eisuke.takahashi.home@gmail.com>
Eitan Adler <lists@eitanadler.com> Eitan Adler <lists@eitanadler.com>
Eivind Uggedal <eivind@uggedal.com> Eivind Uggedal <eivind@uggedal.com>
El Mostafa Idrassi <el.mostafa.idrassi@gmail.com> El Mostafa Idrassi <el.mostafa.idrassi@gmail.com>
@ -882,6 +905,7 @@ Fernandez Ludovic <lfernandez.dev@gmail.com>
Filip Gruszczyński <gruszczy@gmail.com> Filip Gruszczyński <gruszczy@gmail.com>
Filip Haglund <drathier@users.noreply.github.com> Filip Haglund <drathier@users.noreply.github.com>
Filip Stanis <fstanis@google.com> Filip Stanis <fstanis@google.com>
Filippo Rossi <filipporossi@hey.com>
Filippo Valsorda <filippo@golang.org> <filippo@cloudflare.com> <hi@filippo.io> Filippo Valsorda <filippo@golang.org> <filippo@cloudflare.com> <hi@filippo.io>
Firmansyah Adiputra <frm.adiputra@gmail.com> Firmansyah Adiputra <frm.adiputra@gmail.com>
Florian Forster <octo@google.com> Florian Forster <octo@google.com>
@ -965,19 +989,24 @@ GitHub User @ajnirp (1688456) <ajnirp@users.noreply.github.com>
GitHub User @ajz01 (4744634) <ajzdenek@gmail.com> GitHub User @ajz01 (4744634) <ajzdenek@gmail.com>
GitHub User @alkesh26 (1019076) <alkesh26@gmail.com> GitHub User @alkesh26 (1019076) <alkesh26@gmail.com>
GitHub User @andig (184815) <cpuidle@gmx.de> GitHub User @andig (184815) <cpuidle@gmx.de>
GitHub User @AndreasHGK (36563463) <andreaselbergs@outlook.com>
GitHub User @andrius4669 (4699695) <andrius4669@gmail.com> GitHub User @andrius4669 (4699695) <andrius4669@gmail.com>
GitHub User @ardnew (3837367) <andrew@ardnew.com>
GitHub User @ariathaker (51683211) <ariathaker@gmail.com>
GitHub User @as (8127015) <as.utf8@gmail.com> GitHub User @as (8127015) <as.utf8@gmail.com>
GitHub User @bakape (7851952) <bakape@gmail.com> GitHub User @bakape (7851952) <bakape@gmail.com>
GitHub User @bgadrian (830001) <aditza8@gmail.com> GitHub User @bgadrian (830001) <aditza8@gmail.com>
GitHub User @bontequero (2674999) <bontequero@gmail.com> GitHub User @bontequero (2674999) <bontequero@gmail.com>
GitHub User @cch123 (384546) <buaa.cch@gmail.com> GitHub User @cch123 (384546) <buaa.cch@gmail.com>
GitHub User @chainhelen (7046329) <chainhelen@gmail.com> GitHub User @chainhelen (7046329) <chainhelen@gmail.com>
GitHub User @champly (15027259) <champly1993@gmail.com>
GitHub User @chanxuehong (3416908) <chanxuehong@gmail.com> GitHub User @chanxuehong (3416908) <chanxuehong@gmail.com>
GitHub User @Cluas (10056928) <Cluas@live.cn> GitHub User @Cluas (10056928) <Cluas@live.cn>
GitHub User @cncal (23520240) <flycalvin@qq.com> GitHub User @cncal (23520240) <flycalvin@qq.com>
GitHub User @DQNEO (188741) <dqneoo@gmail.com> GitHub User @DQNEO (188741) <dqneoo@gmail.com>
GitHub User @Dreamacro (8615343) <chuainian@gmail.com> GitHub User @Dreamacro (8615343) <chuainian@gmail.com>
GitHub User @dupoxy (1143957) <dupoxy@users.noreply.github.com> GitHub User @dupoxy (1143957) <dupoxy@users.noreply.github.com>
GitHub User @eh-steve (16373174) <eh.steve.99@gmail.com>
GitHub User @EndlessCheng (7086966) <loli.con@qq.com> GitHub User @EndlessCheng (7086966) <loli.con@qq.com>
GitHub User @erifan (31343225) <eric.fang@arm.com> GitHub User @erifan (31343225) <eric.fang@arm.com>
GitHub User @esell (9735165) <eujon.sellers@gmail.com> GitHub User @esell (9735165) <eujon.sellers@gmail.com>
@ -987,6 +1016,7 @@ GitHub User @geedchin (11672310) <geedchin@gmail.com>
GitHub User @GrigoriyMikhalkin (3637857) <grigoriymikhalkin@gmail.com> GitHub User @GrigoriyMikhalkin (3637857) <grigoriymikhalkin@gmail.com>
GitHub User @Gusted (25481501) <williamzijl7@hotmail.com> GitHub User @Gusted (25481501) <williamzijl7@hotmail.com>
GitHub User @hengwu0 (41297446) <41297446+hengwu0@users.noreply.github.com> GitHub User @hengwu0 (41297446) <41297446+hengwu0@users.noreply.github.com>
GitHub User @hidu (613972) <duv123@gmail.com>
GitHub User @hitzhangjie (3725760) <hit.zhangjie@gmail.com> GitHub User @hitzhangjie (3725760) <hit.zhangjie@gmail.com>
GitHub User @hkhere (33268704) <33268704+hkhere@users.noreply.github.com> GitHub User @hkhere (33268704) <33268704+hkhere@users.noreply.github.com>
GitHub User @hqpko (13887251) <whaibin01@hotmail.com> GitHub User @hqpko (13887251) <whaibin01@hotmail.com>
@ -994,8 +1024,10 @@ GitHub User @Illirgway (5428603) <illirgway@gmail.com>
GitHub User @itchyny (375258) <itchyny@hatena.ne.jp> GitHub User @itchyny (375258) <itchyny@hatena.ne.jp>
GitHub User @jinmiaoluo (39730824) <jinmiaoluo@icloud.com> GitHub User @jinmiaoluo (39730824) <jinmiaoluo@icloud.com>
GitHub User @jopbrown (6345470) <msshane2008@gmail.com> GitHub User @jopbrown (6345470) <msshane2008@gmail.com>
GitHub User @Jorropo (24391983) <jorropo.pgm@gmail.com>
GitHub User @kazyshr (30496953) <kazyshr0301@gmail.com> GitHub User @kazyshr (30496953) <kazyshr0301@gmail.com>
GitHub User @kc1212 (1093806) <kc1212@users.noreply.github.com> GitHub User @kc1212 (1093806) <kc1212@users.noreply.github.com>
GitHub User @kkHAIKE (64772) <kkhaike@gmail.com>
GitHub User @komisan19 (18901496) <komiyama6219@gmail.com> GitHub User @komisan19 (18901496) <komiyama6219@gmail.com>
GitHub User @korzhao (64203902) <korzhao95@gmail.com> GitHub User @korzhao (64203902) <korzhao95@gmail.com>
GitHub User @Kropekk (13366453) <kamilkropiewnicki@gmail.com> GitHub User @Kropekk (13366453) <kamilkropiewnicki@gmail.com>
@ -1012,8 +1044,11 @@ GitHub User @markruler (38225900) <csu0414@gmail.com>
GitHub User @Matts966 (28551465) <Matts966@users.noreply.github.com> GitHub User @Matts966 (28551465) <Matts966@users.noreply.github.com>
GitHub User @micnncim (21333876) <micnncim@gmail.com> GitHub User @micnncim (21333876) <micnncim@gmail.com>
GitHub User @mkishere (224617) <224617+mkishere@users.noreply.github.com> GitHub User @mkishere (224617) <224617+mkishere@users.noreply.github.com>
GitHub User @mkontani (19817636) <itoama@live.jp>
GitHub User @mstmdev (5756338) <mstmdev@gmail.com>
GitHub User @nu50218 (40682920) <nu_ll@icloud.com> GitHub User @nu50218 (40682920) <nu_ll@icloud.com>
GitHub User @OlgaVlPetrova (44112727) <OVPpetrova@gmail.com> GitHub User @OlgaVlPetrova (44112727) <OVPpetrova@gmail.com>
GitHub User @penglei (1140325) <penglei@ybyte.org>
GitHub User @pierwill (19642016) <pierwill@users.noreply.github.com> GitHub User @pierwill (19642016) <pierwill@users.noreply.github.com>
GitHub User @pityonline (438222) <pityonline@gmail.com> GitHub User @pityonline (438222) <pityonline@gmail.com>
GitHub User @po3rin (29445112) <abctail30@gmail.com> GitHub User @po3rin (29445112) <abctail30@gmail.com>
@ -1027,6 +1062,7 @@ GitHub User @roudkerk (52280478) <roudkerk@google.com>
GitHub User @saitarunreddy (21041941) <saitarunreddypalla@gmail.com> GitHub User @saitarunreddy (21041941) <saitarunreddypalla@gmail.com>
GitHub User @SataQiu (9354727) <shidaqiu2018@gmail.com> GitHub User @SataQiu (9354727) <shidaqiu2018@gmail.com>
GitHub User @seifchen (23326132) <chenxuefeng1207@gmail.com> GitHub User @seifchen (23326132) <chenxuefeng1207@gmail.com>
GitHub User @sethbattin (1627760) <seth.battin@gmail.com>
GitHub User @shogo-ma (9860598) <Choroma194@gmail.com> GitHub User @shogo-ma (9860598) <Choroma194@gmail.com>
GitHub User @sivchari (55221074) <shibuuuu5@gmail.com> GitHub User @sivchari (55221074) <shibuuuu5@gmail.com>
GitHub User @skanehira (7888591) <sho19921005@gmail.com> GitHub User @skanehira (7888591) <sho19921005@gmail.com>
@ -1037,6 +1073,7 @@ GitHub User @tangxi666 (48145175) <tx1275044634@gmail.com>
GitHub User @tatsumack (4510569) <tatsu.mack@gmail.com> GitHub User @tatsumack (4510569) <tatsu.mack@gmail.com>
GitHub User @tell-k (26263) <ffk2005@gmail.com> GitHub User @tell-k (26263) <ffk2005@gmail.com>
GitHub User @tennashi (10219626) <tennashio@gmail.com> GitHub User @tennashi (10219626) <tennashio@gmail.com>
GitHub User @thepudds (20628140) <thepudds@users.noreply.github.com>
GitHub User @uhei (2116845) <uhei@users.noreply.github.com> GitHub User @uhei (2116845) <uhei@users.noreply.github.com>
GitHub User @uji (49834542) <ujiprog@gmail.com> GitHub User @uji (49834542) <ujiprog@gmail.com>
GitHub User @unbyte (5772358) <i@shangyes.net> GitHub User @unbyte (5772358) <i@shangyes.net>
@ -1048,6 +1085,7 @@ GitHub User @wolf1996 (5901874) <ksgiv37@gmail.com>
GitHub User @yah01 (12216890) <kagaminehuan@gmail.com> GitHub User @yah01 (12216890) <kagaminehuan@gmail.com>
GitHub User @yuanhh (1298735) <yuan415030@gmail.com> GitHub User @yuanhh (1298735) <yuan415030@gmail.com>
GitHub User @zikaeroh (48577114) <zikaeroh@gmail.com> GitHub User @zikaeroh (48577114) <zikaeroh@gmail.com>
GitHub User @zlasd (9432027) <zlasd@hotmail.com>
GitHub User @ZZMarquis (7624583) <zhonglingjian3821@163.com> GitHub User @ZZMarquis (7624583) <zhonglingjian3821@163.com>
Giulio Iotti <dullgiulio@gmail.com> Giulio Iotti <dullgiulio@gmail.com>
Giulio Micheloni <giulio.micheloni@gmail.com> Giulio Micheloni <giulio.micheloni@gmail.com>
@ -1067,6 +1105,7 @@ Greg Steuck <gnezdo+github@google.com>
Greg Thelen <gthelen@google.com> Greg Thelen <gthelen@google.com>
Greg Ward <greg@gerg.ca> Greg Ward <greg@gerg.ca>
Grégoire Delattre <gregoire.delattre@gmail.com> Grégoire Delattre <gregoire.delattre@gmail.com>
Grégoire Détrez <gregoire@fripost.org>
Gregory Man <man.gregory@gmail.com> Gregory Man <man.gregory@gmail.com>
Gregory Petrosyan <gregory.petrosyan@gmail.com> Gregory Petrosyan <gregory.petrosyan@gmail.com>
Guilherme Caruso <gui.martinscaruso@gmail.com> Guilherme Caruso <gui.martinscaruso@gmail.com>
@ -1078,6 +1117,7 @@ Guillaume Blaquiere <guillaume.blaquiere@gmail.com>
Guillaume J. Charmes <guillaume@charmes.net> Guillaume J. Charmes <guillaume@charmes.net>
Guillaume Sottas <guillaumesottas@gmail.com> Guillaume Sottas <guillaumesottas@gmail.com>
Günther Noack <gnoack@google.com> Günther Noack <gnoack@google.com>
Guo Hui <gh73962@gmail.com>
Guobiao Mei <meiguobiao@gmail.com> Guobiao Mei <meiguobiao@gmail.com>
Guodong Li <guodongli@google.com> Guodong Li <guodongli@google.com>
Guoliang Wang <iamwgliang@gmail.com> Guoliang Wang <iamwgliang@gmail.com>
@ -1128,6 +1168,7 @@ Herbert Georg Fischer <herbert.fischer@gmail.com>
Herbie Ong <herbie@google.com> Herbie Ong <herbie@google.com>
Heschi Kreinick <heschi@google.com> Heschi Kreinick <heschi@google.com>
Hidetatsu Yaginuma <ygnmhdtt@gmail.com> Hidetatsu Yaginuma <ygnmhdtt@gmail.com>
Hilário Coelho <hilario.coelho@securityside.com>
Hilko Bengen <bengen@hilluzination.de> Hilko Bengen <bengen@hilluzination.de>
Himanshu Kishna Srivastava <28himanshu@gmail.com> Himanshu Kishna Srivastava <28himanshu@gmail.com>
Hiroaki Nakamura <hnakamur@gmail.com> Hiroaki Nakamura <hnakamur@gmail.com>
@ -1173,8 +1214,10 @@ Igor Dolzhikov <bluesriverz@gmail.com>
Igor Vashyst <ivashyst@gmail.com> Igor Vashyst <ivashyst@gmail.com>
Igor Zhilianin <igor.zhilianin@gmail.com> Igor Zhilianin <igor.zhilianin@gmail.com>
Ikko Ashimine <eltociear@gmail.com> Ikko Ashimine <eltociear@gmail.com>
Ilia Choly <ilia.choly@gmail.com>
Illya Yalovyy <yalovoy@gmail.com> Illya Yalovyy <yalovoy@gmail.com>
Ilya Chukov <56119080+Elias506@users.noreply.github.com> Ilya Chukov <56119080+Elias506@users.noreply.github.com>
Ilya Leoshkevich <iii@linux.ibm.com>
Ilya Mateyko <me@astrophena.name> Ilya Mateyko <me@astrophena.name>
Ilya Sinelnikov <sidhmangh@gmail.com> Ilya Sinelnikov <sidhmangh@gmail.com>
Ilya Tocar <ilya.tocar@intel.com> Ilya Tocar <ilya.tocar@intel.com>
@ -1274,6 +1317,7 @@ Jani Monoses <jani.monoses@ubuntu.com> <jani.monoses@gmail.com>
Jannis Andrija Schnitzer <jannis@schnitzer.im> Jannis Andrija Schnitzer <jannis@schnitzer.im>
Jared Allard <jaredallard@users.noreply.github.com> Jared Allard <jaredallard@users.noreply.github.com>
Jared Culp <jculp14@gmail.com> Jared Culp <jculp14@gmail.com>
Jared Horvat <horvski@gmail.com>
Jaroslavas Počepko <jp@webmaster.ms> Jaroslavas Počepko <jp@webmaster.ms>
Jason A. Donenfeld <Jason@zx2c4.com> Jason A. Donenfeld <Jason@zx2c4.com>
Jason Baker <jason-baker@users.noreply.github.com> Jason Baker <jason-baker@users.noreply.github.com>
@ -1309,6 +1353,7 @@ Jeevanandam M <jeeva@myjeeva.com>
Jeff (Zhefu) Jiang <jeffjiang@google.com> Jeff (Zhefu) Jiang <jeffjiang@google.com>
Jeff Craig <jeffcraig@google.com> Jeff Craig <jeffcraig@google.com>
Jeff Dupont <jeff.dupont@gmail.com> Jeff Dupont <jeff.dupont@gmail.com>
Jeff Grafton <jgrafton@google.com>
Jeff Hodges <jeff@somethingsimilar.com> Jeff Hodges <jeff@somethingsimilar.com>
Jeff Johnson <jrjohnson@google.com> Jeff Johnson <jrjohnson@google.com>
Jeff R. Allen <jra@nella.org> <jeff.allen@gmail.com> Jeff R. Allen <jra@nella.org> <jeff.allen@gmail.com>
@ -1322,6 +1367,7 @@ Jens Frederich <jfrederich@gmail.com>
Jeremiah Harmsen <jeremiah@google.com> Jeremiah Harmsen <jeremiah@google.com>
Jeremy Banks <_@jeremy.ca> Jeremy Banks <_@jeremy.ca>
Jeremy Canady <jcanady@gmail.com> Jeremy Canady <jcanady@gmail.com>
Jeremy Chase <jeremy.chase@gmail.com>
Jeremy Faller <jeremy@golang.org> Jeremy Faller <jeremy@golang.org>
Jeremy Jackins <jeremyjackins@gmail.com> Jeremy Jackins <jeremyjackins@gmail.com>
Jeremy Jay <jeremy@pbnjay.com> Jeremy Jay <jeremy@pbnjay.com>
@ -1352,14 +1398,16 @@ Jimmy Zelinskie <jimmyzelinskie@gmail.com>
Jin-wook Jeong <jeweljar@hanmail.net> Jin-wook Jeong <jeweljar@hanmail.net>
Jingcheng Zhang <diogin@gmail.com> Jingcheng Zhang <diogin@gmail.com>
Jingguo Yao <yaojingguo@gmail.com> Jingguo Yao <yaojingguo@gmail.com>
Jinzhu Zhang <wosmvp@gmail.com>
Jingnan Si <jingnan.si@gmail.com> Jingnan Si <jingnan.si@gmail.com>
Jinkun Zhang <franksnolf@gmail.com> Jinkun Zhang <franksnolf@gmail.com>
Jinwen Wo <wojinwen@huawei.com> Jinwen Wo <wojinwen@huawei.com>
Jinwook Jeong <vustthat@gmail.com>
Jinzhu Zhang <wosmvp@gmail.com>
Jiong Du <londevil@gmail.com> Jiong Du <londevil@gmail.com>
Jirka Daněk <dnk@mail.muni.cz> Jirka Daněk <dnk@mail.muni.cz>
Jiulong Wang <jiulongw@gmail.com> Jiulong Wang <jiulongw@gmail.com>
Joakim Sernbrant <serbaut@gmail.com> Joakim Sernbrant <serbaut@gmail.com>
João Penteado <4219131+joaopenteado@users.noreply.github.com>
Jochen Weber <jochen.weber80@gmail.com> Jochen Weber <jochen.weber80@gmail.com>
Joe Bowbeer <joe.bowbeer@gmail.com> Joe Bowbeer <joe.bowbeer@gmail.com>
Joe Cortopassi <joe@joecortopassi.com> Joe Cortopassi <joe@joecortopassi.com>
@ -1383,8 +1431,10 @@ Johan Euphrosine <proppy@google.com>
Johan Jansson <johan.jansson@iki.fi> Johan Jansson <johan.jansson@iki.fi>
Johan Knutzen <johan@senri.se> Johan Knutzen <johan@senri.se>
Johan Sageryd <j@1616.se> Johan Sageryd <j@1616.se>
Johan Van de Wauw <johan@gisky.be>
Johannes Altmanninger <aclopte@gmail.com> Johannes Altmanninger <aclopte@gmail.com>
Johannes Huning <johannes.huning@gmail.com> Johannes Huning <johannes.huning@gmail.com>
John Anthony <johnanthony.contrib@gmail.com>
John Asmuth <jasmuth@gmail.com> John Asmuth <jasmuth@gmail.com>
John Bampton <jbampton@gmail.com> John Bampton <jbampton@gmail.com>
John Beisley <huin@google.com> John Beisley <huin@google.com>
@ -1458,6 +1508,8 @@ Josh Deprez <josh.deprez@gmail.com>
Josh Goebel <dreamer3@gmail.com> Josh Goebel <dreamer3@gmail.com>
Josh Hoak <jhoak@google.com> Josh Hoak <jhoak@google.com>
Josh Holland <jrh@joshh.co.uk> Josh Holland <jrh@joshh.co.uk>
Josh Humphries <jh@fullstory.com>
Josh Powers <jpowers@influxdata.com>
Josh Rickmar <jrick@companyzero.com> Josh Rickmar <jrick@companyzero.com>
Josh Roppo <joshroppo@gmail.com> Josh Roppo <joshroppo@gmail.com>
Josh Varga <josh.varga@gmail.com> Josh Varga <josh.varga@gmail.com>
@ -1564,11 +1616,13 @@ Kevin Klues <klueska@gmail.com> <klueska@google.com>
Kevin Malachowski <chowski@google.com> Kevin Malachowski <chowski@google.com>
Kevin Parsons <kevpar@microsoft.com> Kevin Parsons <kevpar@microsoft.com>
Kevin Ruffin <kruffin@gmail.com> Kevin Ruffin <kruffin@gmail.com>
Kevin Smith <ksmith.nop@gmail.com>
Kevin Vu <kevin.m.vu@gmail.com> Kevin Vu <kevin.m.vu@gmail.com>
Kevin Zita <bleedgreenandgold@gmail.com> Kevin Zita <bleedgreenandgold@gmail.com>
Keyan Pishdadian <kpishdadian@gmail.com> Keyan Pishdadian <kpishdadian@gmail.com>
Keyuan Li <keyuanli123@gmail.com> Keyuan Li <keyuanli123@gmail.com>
Kezhu Wang <kezhuw@gmail.com> Kezhu Wang <kezhuw@gmail.com>
Khaled Yakdan <yakdan@code-intelligence.com>
Khosrow Moossavi <khos2ow@gmail.com> Khosrow Moossavi <khos2ow@gmail.com>
Kieran Colford <kieran@kcolford.com> Kieran Colford <kieran@kcolford.com>
Kieran Gorman <kieran.j.gorman@gmail.com> Kieran Gorman <kieran.j.gorman@gmail.com>
@ -1590,6 +1644,7 @@ Koki Tomoshige <tomocy.dev@gmail.com>
Komu Wairagu <komuw05@gmail.com> Komu Wairagu <komuw05@gmail.com>
Konstantin <konstantin8105@gmail.com> Konstantin <konstantin8105@gmail.com>
Konstantin Shaposhnikov <k.shaposhnikov@gmail.com> Konstantin Shaposhnikov <k.shaposhnikov@gmail.com>
Koumei Mikuni <komata392@gmail.com>
Koya IWAMURA <kiwamura0314@gmail.com> Koya IWAMURA <kiwamura0314@gmail.com>
Kris Kwiatkowski <kris@cloudflare.com> Kris Kwiatkowski <kris@cloudflare.com>
Kris Nova <kris@nivenly.com> Kris Nova <kris@nivenly.com>
@ -1625,6 +1680,7 @@ Lars Jeppesen <jeppesen.lars@gmail.com>
Lars Lehtonen <lars.lehtonen@gmail.com> Lars Lehtonen <lars.lehtonen@gmail.com>
Lars Wiegman <lars@namsral.com> Lars Wiegman <lars@namsral.com>
Larz Conwell <larzconwell@gmail.com> Larz Conwell <larzconwell@gmail.com>
Lasse Folger <lassefolger@google.com>
Laurent Voisin <lpvoisin@gmail.com> Laurent Voisin <lpvoisin@gmail.com>
Laurie Clark-Michalek <laurie@qubit.com> Laurie Clark-Michalek <laurie@qubit.com>
LE Manh Cuong <cuong.manhle.vn@gmail.com> LE Manh Cuong <cuong.manhle.vn@gmail.com>
@ -1656,6 +1712,7 @@ Lorenz Nickel <mail@lorenznickel.de>
Lorenzo Masini <rugginoso@develer.com> Lorenzo Masini <rugginoso@develer.com>
Lorenzo Stoakes <lstoakes@gmail.com> Lorenzo Stoakes <lstoakes@gmail.com>
Louis Kruger <louisk@google.com> Louis Kruger <louisk@google.com>
Louis Portay <louisportay@gmail.com>
Luan Santos <cfcluan@gmail.com> Luan Santos <cfcluan@gmail.com>
Lubomir I. Ivanov <neolit123@gmail.com> Lubomir I. Ivanov <neolit123@gmail.com>
Luca Bruno <luca.bruno@coreos.com> Luca Bruno <luca.bruno@coreos.com>
@ -1670,6 +1727,7 @@ Luigi Riefolo <luigi.riefolo@gmail.com>
Luit van Drongelen <luitvd@gmail.com> Luit van Drongelen <luitvd@gmail.com>
Luka Zakrajšek <tr00.g33k@gmail.com> Luka Zakrajšek <tr00.g33k@gmail.com>
Luka Zitnik <luka.zitnik@gmail.com> Luka Zitnik <luka.zitnik@gmail.com>
Lukas Joisten <luckuck.f95@gmail.com>
Lukasz Milewski <lmmilewski@gmail.com> Lukasz Milewski <lmmilewski@gmail.com>
Luke Champine <luke.champine@gmail.com> Luke Champine <luke.champine@gmail.com>
Luke Curley <qpingu@gmail.com> Luke Curley <qpingu@gmail.com>
@ -1688,6 +1746,7 @@ Magnus Hiie <magnus.hiie@gmail.com>
Mahdi Hosseini Moghaddam <seyed.mahdi.hosseini.moghaddam@ibm.com> Mahdi Hosseini Moghaddam <seyed.mahdi.hosseini.moghaddam@ibm.com>
Maia Lee <maia.lee@leftfieldlabs.com> Maia Lee <maia.lee@leftfieldlabs.com>
Maicon Costa <maiconscosta@gmail.com> Maicon Costa <maiconscosta@gmail.com>
Maisem Ali <maisem@tailscale.com>
Mak Kolybabi <mak@kolybabi.com> Mak Kolybabi <mak@kolybabi.com>
Maksym Trykur <maksym.trykur@gmail.com> Maksym Trykur <maksym.trykur@gmail.com>
Mal Curtis <mal@mal.co.nz> Mal Curtis <mal@mal.co.nz>
@ -1779,6 +1838,7 @@ Matheus Alcantara <matheusssilv97@gmail.com>
Mathias Beke <git@denbeke.be> Mathias Beke <git@denbeke.be>
Mathias Hall-Andersen <mathias@hall-andersen.dk> Mathias Hall-Andersen <mathias@hall-andersen.dk>
Mathias Leppich <mleppich@muhqu.de> Mathias Leppich <mleppich@muhqu.de>
Mathieu Aubin <mathieu@zeroserieux.com>
Mathieu Lonjaret <mathieu.lonjaret@gmail.com> Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
Mats Lidell <mats.lidell@cag.se> <mats.lidell@gmail.com> Mats Lidell <mats.lidell@cag.se> <mats.lidell@gmail.com>
Matt Aimonetti <mattaimonetti@gmail.com> Matt Aimonetti <mattaimonetti@gmail.com>
@ -1795,6 +1855,7 @@ Matt Juran <thepciet@gmail.com>
Matt Layher <mdlayher@gmail.com> <mdlayher@planetscale.com> Matt Layher <mdlayher@gmail.com> <mdlayher@planetscale.com>
Matt Masurka <masurka@google.com> Matt Masurka <masurka@google.com>
Matt Pearring <broskies@google.com> Matt Pearring <broskies@google.com>
Matt Prahl <mprahl@redhat.com>
Matt Reiferson <mreiferson@gmail.com> Matt Reiferson <mreiferson@gmail.com>
Matt Robenolt <matt@ydekproductions.com> Matt Robenolt <matt@ydekproductions.com>
Matt Strong <mstrong1341@gmail.com> Matt Strong <mstrong1341@gmail.com>
@ -1826,11 +1887,14 @@ Maxim Pimenov <mpimenov@google.com>
Maxim Pugachev <pugachev.mm@gmail.com> Maxim Pugachev <pugachev.mm@gmail.com>
Maxim Ushakov <ushakov@google.com> Maxim Ushakov <ushakov@google.com>
Maxime de Roucy <maxime.deroucy@gmail.com> Maxime de Roucy <maxime.deroucy@gmail.com>
Maxime Soulé <zeptomax@gmail.com>
Maxime Veber <nek.dev@gmail.com>
Máximo Cuadros Ortiz <mcuadros@gmail.com> Máximo Cuadros Ortiz <mcuadros@gmail.com>
Maxwell Krohn <themax@gmail.com> Maxwell Krohn <themax@gmail.com>
Maya Rashish <maya@NetBSD.org> Maya Rashish <maya@NetBSD.org>
Mayank Kumar <krmayankk@gmail.com> Mayank Kumar <krmayankk@gmail.com>
Mehrad Sadeghi <2012.linkinpark@gmail.com> Mehrad Sadeghi <2012.linkinpark@gmail.com>
Meidan Li <limeidan@loongson.cn>
Meir Fischer <meirfischer@gmail.com> Meir Fischer <meirfischer@gmail.com>
Meng Zhuo <mengzhuo1203@gmail.com> <mzh@golangcn.org> Meng Zhuo <mengzhuo1203@gmail.com> <mzh@golangcn.org>
Mhd Sulhan <m.shulhan@gmail.com> Mhd Sulhan <m.shulhan@gmail.com>
@ -1848,6 +1912,7 @@ Michael Ellis <micellis@justin.tv>
Michael Fraenkel <michael.fraenkel@gmail.com> Michael Fraenkel <michael.fraenkel@gmail.com>
Michael Fromberger <michael.j.fromberger@gmail.com> Michael Fromberger <michael.j.fromberger@gmail.com>
Michael Gehring <mg@ebfe.org> <gnirheg.leahcim@gmail.com> Michael Gehring <mg@ebfe.org> <gnirheg.leahcim@gmail.com>
Michael Gross <info@komika.org>
Michael Henderson <mdhender@users.noreply.github.com> Michael Henderson <mdhender@users.noreply.github.com>
Michael Hendricks <michael@ndrix.org> Michael Hendricks <michael@ndrix.org>
Michael Hoisie <hoisie@gmail.com> Michael Hoisie <hoisie@gmail.com>
@ -1909,6 +1974,7 @@ Mike Houston <mike@kothar.net>
Mike Kabischev <kabischev@gmail.com> Mike Kabischev <kabischev@gmail.com>
Mike Rosset <mike.rosset@gmail.com> Mike Rosset <mike.rosset@gmail.com>
Mike Samuel <mikesamuel@gmail.com> Mike Samuel <mikesamuel@gmail.com>
Mike Seplowitz <mseplowitz@bloomberg.net>
Mike Solomon <msolo@gmail.com> Mike Solomon <msolo@gmail.com>
Mike Strosaker <strosake@us.ibm.com> Mike Strosaker <strosake@us.ibm.com>
Mike Tsao <mike@sowbug.com> Mike Tsao <mike@sowbug.com>
@ -1939,9 +2005,11 @@ Monty Taylor <mordred@inaugust.com>
Moritz Fain <moritz@fain.io> Moritz Fain <moritz@fain.io>
Moriyoshi Koizumi <mozo@mozo.jp> Moriyoshi Koizumi <mozo@mozo.jp>
Morten Siebuhr <sbhr@sbhr.dk> Morten Siebuhr <sbhr@sbhr.dk>
Moshe Good <moshe@squareup.com>
Môshe van der Sterre <moshevds@gmail.com> Môshe van der Sterre <moshevds@gmail.com>
Mostafa Solati <mostafa.solati@gmail.com> Mostafa Solati <mostafa.solati@gmail.com>
Mostyn Bramley-Moore <mostyn@antipode.se> Mostyn Bramley-Moore <mostyn@antipode.se>
Motiejus Jakštys <motiejus@jakstys.lt>
Mrunal Patel <mrunalp@gmail.com> Mrunal Patel <mrunalp@gmail.com>
Muhammad Falak R Wani <falakreyaz@gmail.com> Muhammad Falak R Wani <falakreyaz@gmail.com>
Muhammad Hamza Farrukh <hamzafarrukh141@gmail.com> Muhammad Hamza Farrukh <hamzafarrukh141@gmail.com>
@ -2001,6 +2069,7 @@ Nick Robinson <nrobinson13@gmail.com>
Nick Sherron <nsherron90@gmail.com> Nick Sherron <nsherron90@gmail.com>
Nick Smolin <nick27surgut@gmail.com> Nick Smolin <nick27surgut@gmail.com>
Nicolas BRULEZ <n.brulez@gmail.com> Nicolas BRULEZ <n.brulez@gmail.com>
Nicolas Hillegeer <aktau@google.com>
Nicolas Kaiser <nikai@nikai.net> Nicolas Kaiser <nikai@nikai.net>
Nicolas Owens <mischief@offblast.org> Nicolas Owens <mischief@offblast.org>
Nicolas S. Dade <nic.dade@gmail.com> Nicolas S. Dade <nic.dade@gmail.com>
@ -2049,6 +2118,7 @@ Olivier Duperray <duperray.olivier@gmail.com>
Olivier Mengué <olivier.mengue@gmail.com> Olivier Mengué <olivier.mengue@gmail.com>
Olivier Poitrey <rs@dailymotion.com> Olivier Poitrey <rs@dailymotion.com>
Olivier Saingre <osaingre@gmail.com> Olivier Saingre <osaingre@gmail.com>
Olivier Szika <olivier.szika@vadesecure.com>
Olivier Wulveryck <olivier.wulveryck@gmail.com> Olivier Wulveryck <olivier.wulveryck@gmail.com>
Omar Jarjur <ojarjur@google.com> Omar Jarjur <ojarjur@google.com>
Onkar Jadhav <omjadhav2610@gmail.com> Onkar Jadhav <omjadhav2610@gmail.com>
@ -2069,6 +2139,7 @@ Panos Georgiadis <pgeorgiadis@suse.de>
Pantelis Sampaziotis <psampaz@gmail.com> Pantelis Sampaziotis <psampaz@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com> Paolo Giarrusso <p.giarrusso@gmail.com>
Paolo Martini <mrtnpaolo@gmail.com> Paolo Martini <mrtnpaolo@gmail.com>
Park Zhou <buildpaas@gmail.com>
Parker Moore <parkrmoore@gmail.com> Parker Moore <parkrmoore@gmail.com>
Parminder Singh <parmsingh101@gmail.com> Parminder Singh <parmsingh101@gmail.com>
Pascal Dierich <pascal@pascaldierich.com> Pascal Dierich <pascal@pascaldierich.com>
@ -2086,6 +2157,7 @@ Patrick Lee <pattyshack101@gmail.com>
Patrick Mézard <patrick@mezard.eu> Patrick Mézard <patrick@mezard.eu>
Patrick Mylund Nielsen <patrick@patrickmn.com> Patrick Mylund Nielsen <patrick@patrickmn.com>
Patrick Pelletier <pp.pelletier@gmail.com> Patrick Pelletier <pp.pelletier@gmail.com>
Patrick Pokatilo <mail@shyxormz.net>
Patrick Riley <pfr@google.com> Patrick Riley <pfr@google.com>
Patrick Smith <pat42smith@gmail.com> Patrick Smith <pat42smith@gmail.com>
Patrik Lundin <patrik@sigterm.se> Patrik Lundin <patrik@sigterm.se>
@ -2118,6 +2190,7 @@ Paul Wankadia <junyer@google.com>
Paulo Casaretto <pcasaretto@gmail.com> Paulo Casaretto <pcasaretto@gmail.com>
Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com> Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
Paulo Gomes <paulo.gomes.uk@gmail.com> Paulo Gomes <paulo.gomes.uk@gmail.com>
Pavel Gryaznov <grbitt@gmail.com>
Pavel Kositsyn <kositsyn.pa@phystech.edu> Pavel Kositsyn <kositsyn.pa@phystech.edu>
Pavel Paulau <pavel.paulau@gmail.com> Pavel Paulau <pavel.paulau@gmail.com>
Pavel Watson <watsonpavel@gmail.com> Pavel Watson <watsonpavel@gmail.com>
@ -2154,6 +2227,7 @@ Péter Szabó <pts@google.com>
Péter Szilágyi <peterke@gmail.com> Péter Szilágyi <peterke@gmail.com>
Peter Teichman <pteichman@fastly.com> Peter Teichman <pteichman@fastly.com>
Peter Tseng <ptseng@squareup.com> Peter Tseng <ptseng@squareup.com>
Peter Verraedt <peter@verraedt.be>
Peter Waldschmidt <peter@waldschmidt.com> Peter Waldschmidt <peter@waldschmidt.com>
Peter Waller <peter.waller@gmail.com> Peter Waller <peter.waller@gmail.com>
Peter Weinberger <pjw@golang.org> Peter Weinberger <pjw@golang.org>
@ -2162,6 +2236,8 @@ Peter Wu <pwu@cloudflare.com>
Peter Zhang <i@ddatsh.com> Peter Zhang <i@ddatsh.com>
Petr Jediný <petr.jediny@gmail.com> Petr Jediný <petr.jediny@gmail.com>
Petrica Voicu <pvoicu@paypal.com> Petrica Voicu <pvoicu@paypal.com>
Phil Bracikowski <pbracikowski@influxdata.com>
Phil Kulin <schors@gmail.com>
Phil Pearl <philip.j.r.pearl@gmail.com> Phil Pearl <philip.j.r.pearl@gmail.com>
Phil Pennock <pdp@golang.org> Phil Pennock <pdp@golang.org>
Philip Børgesen <philip.borgesen@gmail.com> Philip Børgesen <philip.borgesen@gmail.com>
@ -2171,6 +2247,7 @@ Philip K. Warren <pkwarren@gmail.com>
Philip Nelson <me@pnelson.ca> Philip Nelson <me@pnelson.ca>
Philipp Sauter <sauterp@protonmail.com> Philipp Sauter <sauterp@protonmail.com>
Philipp Stephani <phst@google.com> Philipp Stephani <phst@google.com>
Philippe Antoine <contact@catenacyber.fr>
Phillip Campbell <15082+phillc@users.noreply.github.com> Phillip Campbell <15082+phillc@users.noreply.github.com>
Pierre Carru <pierre.carru@eshard.com> Pierre Carru <pierre.carru@eshard.com>
Pierre Durand <pierredurand@gmail.com> Pierre Durand <pierredurand@gmail.com>
@ -2184,6 +2261,7 @@ Plekhanov Maxim <kishtatix@gmail.com>
Poh Zi How <poh.zihow@gmail.com> Poh Zi How <poh.zihow@gmail.com>
Polina Osadcha <polliosa@google.com> Polina Osadcha <polliosa@google.com>
Pontus Leitzler <leitzler@gmail.com> Pontus Leitzler <leitzler@gmail.com>
Pooja Shyamsundar <poojashyam@ibm.com>
Povilas Versockas <p.versockas@gmail.com> Povilas Versockas <p.versockas@gmail.com>
Prajwal Koirala <16564273+Prajwal-Koirala@users.noreply.github.com> Prajwal Koirala <16564273+Prajwal-Koirala@users.noreply.github.com>
Prasanga Siripala <pj@pjebs.com.au> Prasanga Siripala <pj@pjebs.com.au>
@ -2235,7 +2313,9 @@ Rebecca Stambler <rstambler@golang.org>
Reilly Watson <reillywatson@gmail.com> Reilly Watson <reillywatson@gmail.com>
Reinaldo de Souza Jr <juniorz@gmail.com> Reinaldo de Souza Jr <juniorz@gmail.com>
Remi Gillig <remigillig@gmail.com> Remi Gillig <remigillig@gmail.com>
Remy Chantenay <remy.chantenay@gmail.com>
Rémy Oudompheng <oudomphe@phare.normalesup.org> <remyoudompheng@gmail.com> Rémy Oudompheng <oudomphe@phare.normalesup.org> <remyoudompheng@gmail.com>
Ren Kanai <rk2904powr@gmail.com>
Ren Ogaki <re.yuz77777@gmail.com> Ren Ogaki <re.yuz77777@gmail.com>
Rens Rikkerink <Ikkerens@users.noreply.github.com> Rens Rikkerink <Ikkerens@users.noreply.github.com>
Rhys Hiltner <rhys@justin.tv> Rhys Hiltner <rhys@justin.tv>
@ -2301,8 +2381,10 @@ Romain Baugue <romain.baugue@elwinar.com>
Roman Budnikov <romanyx90@yandex.ru> Roman Budnikov <romanyx90@yandex.ru>
Roman Kollár <roman.kollar.0@gmail.com> Roman Kollár <roman.kollar.0@gmail.com>
Roman Shchekin <mrqtros@gmail.com> Roman Shchekin <mrqtros@gmail.com>
Romanos Skiadas <rom.skiad@gmail.com>
Ron Hashimoto <mail@h2so5.net> Ron Hashimoto <mail@h2so5.net>
Ron Minnich <rminnich@gmail.com> Ron Minnich <rminnich@gmail.com>
Ronaldo Lanhellas <ronaldo.lanhellas@gmail.com>
Ronnie Ebrin <ebrin.ronnie@protonmail.com> Ronnie Ebrin <ebrin.ronnie@protonmail.com>
Ross Chater <rdchater@gmail.com> Ross Chater <rdchater@gmail.com>
Ross Kinsey <rossikinsey@gmail.com> Ross Kinsey <rossikinsey@gmail.com>
@ -2341,6 +2423,7 @@ Sabin Mihai Rapan <sabin.rapan@gmail.com>
Sad Pencil <qh06@qq.com> Sad Pencil <qh06@qq.com>
Sai Cheemalapati <saicheems@google.com> Sai Cheemalapati <saicheems@google.com>
Sai Kiran Dasika <kirandasika30@gmail.com> Sai Kiran Dasika <kirandasika30@gmail.com>
Sai Sunder <saisunder92@gmail.com>
Sakeven Jiang <jc5930@sina.cn> Sakeven Jiang <jc5930@sina.cn>
Salaheddin M. Mahmud <salah.mahmud@gmail.com> Salaheddin M. Mahmud <salah.mahmud@gmail.com>
Salmān Aljammāz <s@0x65.net> Salmān Aljammāz <s@0x65.net>
@ -2392,6 +2475,7 @@ Sean Liao <seankhliao@gmail.com>
Sean Rees <sean@erifax.org> Sean Rees <sean@erifax.org>
Sebastiaan van Stijn <github@gone.nl> Sebastiaan van Stijn <github@gone.nl>
Sebastian Chlopecki <sebsebmc@gmail.com> Sebastian Chlopecki <sebsebmc@gmail.com>
Sebastian Gassner <sepastian@users.noreply.github.com>
Sebastian Kinne <skinne@google.com> Sebastian Kinne <skinne@google.com>
Sebastian Schmidt <yath@google.com> Sebastian Schmidt <yath@google.com>
Sebastien Binet <seb.binet@gmail.com> Sebastien Binet <seb.binet@gmail.com>
@ -2428,6 +2512,7 @@ Shamim Akhtar <shamim.rhce@gmail.com>
Shane Hansen <shanemhansen@gmail.com> Shane Hansen <shanemhansen@gmail.com>
Shang Jian Ding <sding3@ncsu.edu> Shang Jian Ding <sding3@ncsu.edu>
Shaozhen Ding <dsz0111@gmail.com> Shaozhen Ding <dsz0111@gmail.com>
Shapor Naghibzadeh <shapor@gmail.com>
Shaquille Que <shaquille@golang.org> Shaquille Que <shaquille@golang.org>
Shaquille Wyan Que <shaqqywyan@gmail.com> Shaquille Wyan Que <shaqqywyan@gmail.com>
Shaun Dunning <shaun.dunning@uservoice.com> Shaun Dunning <shaun.dunning@uservoice.com>
@ -2441,6 +2526,7 @@ Shengyu Zhang <shengyu.zhang@chaitin.com>
Shi Han Ng <shihanng@gmail.com> Shi Han Ng <shihanng@gmail.com>
ShihCheng Tu <mrtoastcheng@gmail.com> ShihCheng Tu <mrtoastcheng@gmail.com>
Shijie Hao <haormj@gmail.com> Shijie Hao <haormj@gmail.com>
Shiming Zhang <wzshiming@foxmail.com>
Shin Fan <shinfan@google.com> Shin Fan <shinfan@google.com>
Shinji Tanaka <shinji.tanaka@gmail.com> Shinji Tanaka <shinji.tanaka@gmail.com>
Shinnosuke Sawada <6warashi9@gmail.com> Shinnosuke Sawada <6warashi9@gmail.com>
@ -2455,6 +2541,7 @@ Shuai Tan <hopehook.com@gmail.com> <hopehook@qq.com>
Shubham Sharma <shubham.sha12@gmail.com> Shubham Sharma <shubham.sha12@gmail.com>
Shuhei Takahashi <nya@chromium.org> Shuhei Takahashi <nya@chromium.org>
Shun Fan <sfan@google.com> Shun Fan <sfan@google.com>
Silke Hofstra <silke@slxh.eu>
Silvan Jegen <s.jegen@gmail.com> Silvan Jegen <s.jegen@gmail.com>
Simão Gomes Viana <simaogmv@gmail.com> Simão Gomes Viana <simaogmv@gmail.com>
Simarpreet Singh <simar@linux.com> Simarpreet Singh <simar@linux.com>
@ -2470,6 +2557,7 @@ Simon Thulbourn <simon+github@thulbourn.com>
Simon Whitehead <chemnova@gmail.com> Simon Whitehead <chemnova@gmail.com>
Sina Siadat <siadat@gmail.com> Sina Siadat <siadat@gmail.com>
Sjoerd Siebinga <sjoerd.siebinga@gmail.com> Sjoerd Siebinga <sjoerd.siebinga@gmail.com>
Sofía Celi <cherenkovd69@gmail.com>
Sokolov Yura <funny.falcon@gmail.com> Sokolov Yura <funny.falcon@gmail.com>
Song Gao <song@gao.io> Song Gao <song@gao.io>
Song Lim <songlim327@gmail.com> Song Lim <songlim327@gmail.com>
@ -2498,6 +2586,7 @@ Stephan Klatt <stephan.klatt@gmail.com>
Stephan Renatus <srenatus@chef.io> Stephan Renatus <srenatus@chef.io>
Stephan Zuercher <zuercher@gmail.com> Stephan Zuercher <zuercher@gmail.com>
Stéphane Travostino <stephane.travostino@gmail.com> Stéphane Travostino <stephane.travostino@gmail.com>
Stephen Eckels <stevemk14ebr@gmail.com>
Stephen Lewis <stephen@sock.org.uk> Stephen Lewis <stephen@sock.org.uk>
Stephen Lu <steuhs@users.noreply.github.com> Stephen Lu <steuhs@users.noreply.github.com>
Stephen Ma <stephenm@golang.org> Stephen Ma <stephenm@golang.org>
@ -2517,6 +2606,7 @@ Steven Buss <sbuss@google.com>
Steven Elliot Harris <seharris@gmail.com> Steven Elliot Harris <seharris@gmail.com>
Steven Erenst <stevenerenst@gmail.com> Steven Erenst <stevenerenst@gmail.com>
Steven Hartland <steven.hartland@multiplay.co.uk> Steven Hartland <steven.hartland@multiplay.co.uk>
Steven Johnstone <steven.james.johnstone@gmail.com>
Steven Littiebrant <imgroxx@gmail.com> Steven Littiebrant <imgroxx@gmail.com>
Steven Maude <git@stevenmaude.co.uk> Steven Maude <git@stevenmaude.co.uk>
Steven Wilkin <stevenwilkin@gmail.com> Steven Wilkin <stevenwilkin@gmail.com>
@ -2561,6 +2651,7 @@ Tao Wang <twang2218@gmail.com>
Tarmigan Casebolt <tarmigan@gmail.com> Tarmigan Casebolt <tarmigan@gmail.com>
Taro Aoki <aizu.s1230022@gmail.com> Taro Aoki <aizu.s1230022@gmail.com>
Taru Karttunen <taruti@taruti.net> Taru Karttunen <taruti@taruti.net>
Tatiana Bradley <tatiana@golang.org>
Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com> Tatsuhiro Tsujikawa <tatsuhiro.t@gmail.com>
Tatsuya Kaneko <m.ddotx.f@gmail.com> Tatsuya Kaneko <m.ddotx.f@gmail.com>
Taufiq Rahman <taufiqrx8@gmail.com> Taufiq Rahman <taufiqrx8@gmail.com>
@ -2686,6 +2777,7 @@ Uriel Mangado <uriel@berlinblue.org>
Urvil Patel <patelurvil38@gmail.com> Urvil Patel <patelurvil38@gmail.com>
Utkarsh Dixit <53217283+utkarsh-extc@users.noreply.github.com> Utkarsh Dixit <53217283+utkarsh-extc@users.noreply.github.com>
Uttam C Pawar <uttam.c.pawar@intel.com> Uttam C Pawar <uttam.c.pawar@intel.com>
Uzondu Enudeme <uzondu@orijtech.com>
Vadim Grek <vadimprog@gmail.com> Vadim Grek <vadimprog@gmail.com>
Vadim Vygonets <unixdj@gmail.com> Vadim Vygonets <unixdj@gmail.com>
Val Polouchkine <vpolouch@justin.tv> Val Polouchkine <vpolouch@justin.tv>
@ -2723,7 +2815,7 @@ Vladimir Mihailenco <vladimir.webdev@gmail.com>
Vladimir Nikishenko <vova616@gmail.com> Vladimir Nikishenko <vova616@gmail.com>
Vladimir Stefanovic <vladimir.stefanovic@imgtec.com> Vladimir Stefanovic <vladimir.stefanovic@imgtec.com>
Vladimir Varankin <nek.narqo@gmail.com> Vladimir Varankin <nek.narqo@gmail.com>
Vojtech Bocek <vbocek@gmail.com> Vojtěch Boček <vojtech.bocek@avast.com> <vbocek@gmail.com>
Volker Dobler <dr.volker.dobler@gmail.com> Volker Dobler <dr.volker.dobler@gmail.com>
Volodymyr Paprotski <vpaprots@ca.ibm.com> Volodymyr Paprotski <vpaprots@ca.ibm.com>
Vyacheslav Pachkov <slava.pach@gmail.com> Vyacheslav Pachkov <slava.pach@gmail.com>
@ -2733,8 +2825,10 @@ Wagner Riffel <wgrriffel@gmail.com>
Walt Della <walt@javins.net> Walt Della <walt@javins.net>
Walter Poupore <wpoupore@google.com> Walter Poupore <wpoupore@google.com>
Wander Lairson Costa <wcosta@mozilla.com> Wander Lairson Costa <wcosta@mozilla.com>
Wang Deyu <wangdeyu.2021@bytedance.com>
Wang Xuerui <git@xen0n.name> Wang Xuerui <git@xen0n.name>
Warren Fernandes <warren.f.fernandes@gmail.com> Warren Fernandes <warren.f.fernandes@gmail.com>
Watson Ladd <watson@cloudflare.com>
Wayne Ashley Berry <wayneashleyberry@gmail.com> Wayne Ashley Berry <wayneashleyberry@gmail.com>
Wayne Zuo <wdvxdr1123@gmail.com> <wdvxdr@golangcn.org> Wayne Zuo <wdvxdr1123@gmail.com> <wdvxdr@golangcn.org>
Wedson Almeida Filho <wedsonaf@google.com> Wedson Almeida Filho <wedsonaf@google.com>
@ -2747,6 +2841,7 @@ Wei Xikai <xykwei@gmail.com>
Weichao Tang <tevic.tt@gmail.com> Weichao Tang <tevic.tt@gmail.com>
Weilu Jia <optix2000@gmail.com> Weilu Jia <optix2000@gmail.com>
Weixie Cui <cuiweixie@gmail.com> <523516579@qq.com> Weixie Cui <cuiweixie@gmail.com> <523516579@qq.com>
Weizhi Yan <yanweizhi@bytedance.com>
Wembley G. Leach, Jr <wembley.gl@gmail.com> Wembley G. Leach, Jr <wembley.gl@gmail.com>
Wen Yang <yangwen.yw@gmail.com> Wen Yang <yangwen.yw@gmail.com>
Wenlei (Frank) He <wlhe@google.com> Wenlei (Frank) He <wlhe@google.com>
@ -2756,6 +2851,7 @@ Wilfried Teiken <wteiken@google.com>
Will Beason <willbeason@gmail.com> Will Beason <willbeason@gmail.com>
Will Chan <willchan@google.com> Will Chan <willchan@google.com>
Will Faught <will.faught@gmail.com> Will Faught <will.faught@gmail.com>
Will Hawkins <whh8b@obs.cr>
Will Morrow <wmorrow.qdt@qualcommdatacenter.com> Will Morrow <wmorrow.qdt@qualcommdatacenter.com>
Will Norris <willnorris@google.com> Will Norris <willnorris@google.com>
Will Storey <will@summercat.com> Will Storey <will@summercat.com>
@ -2801,6 +2897,7 @@ Yestin Sun <ylh@pdx.edu>
Yesudeep Mangalapilly <yesudeep@google.com> Yesudeep Mangalapilly <yesudeep@google.com>
Yissakhar Z. Beck <yissakhar.beck@gmail.com> Yissakhar Z. Beck <yissakhar.beck@gmail.com>
Yo-An Lin <yoanlin93@gmail.com> Yo-An Lin <yoanlin93@gmail.com>
Yogesh Mangaj <yogesh.mangaj@gmail.com>
Yohei Takeda <yo.tak0812@gmail.com> Yohei Takeda <yo.tak0812@gmail.com>
Yongjian Xu <i3dmaster@gmail.com> Yongjian Xu <i3dmaster@gmail.com>
Yorman Arias <cixtords@gmail.com> Yorman Arias <cixtords@gmail.com>
@ -2829,6 +2926,7 @@ Yuval Pavel Zholkover <paulzhol@gmail.com>
Yves Junqueira <yvesj@google.com> <yves.junqueira@gmail.com> Yves Junqueira <yvesj@google.com> <yves.junqueira@gmail.com>
Zac Bergquist <zbergquist99@gmail.com> Zac Bergquist <zbergquist99@gmail.com>
Zach Bintliff <zbintliff@gmail.com> Zach Bintliff <zbintliff@gmail.com>
Zach Collier <zamicol@gmail.com>
Zach Gershman <zachgersh@gmail.com> Zach Gershman <zachgersh@gmail.com>
Zach Hoffman <zrhoffman@apache.org> Zach Hoffman <zrhoffman@apache.org>
Zach Jones <zachj1@gmail.com> Zach Jones <zachj1@gmail.com>
@ -2838,6 +2936,7 @@ Zachary Gershman <zgershman@pivotal.io>
Zaiyang Li <zaiyangli777@gmail.com> Zaiyang Li <zaiyangli777@gmail.com>
Zak <zrjknill@gmail.com> Zak <zrjknill@gmail.com>
Zakatell Kanda <hi@zkanda.io> Zakatell Kanda <hi@zkanda.io>
Zeke Lu <lvzecai@gmail.com>
Zellyn Hunter <zellyn@squareup.com> <zellyn@gmail.com> Zellyn Hunter <zellyn@squareup.com> <zellyn@gmail.com>
Zev Goldstein <zev.goldstein@gmail.com> Zev Goldstein <zev.goldstein@gmail.com>
Zhang Boyang <zhangboyang.id@gmail.com> Zhang Boyang <zhangboyang.id@gmail.com>
@ -2858,6 +2957,7 @@ Zvonimir Pavlinovic <zpavlinovic@google.com>
Zyad A. Ali <zyad.ali.me@gmail.com> Zyad A. Ali <zyad.ali.me@gmail.com>
Максадбек Ахмедов <a.maksadbek@gmail.com> Максадбек Ахмедов <a.maksadbek@gmail.com>
Максим Федосеев <max.faceless.frei@gmail.com> Максим Федосеев <max.faceless.frei@gmail.com>
Михаил Патин <mixa1243@gmail.com>
Роман Хавроненко <hagen1778@gmail.com> Роман Хавроненко <hagen1778@gmail.com>
Тарас Буник <tbunyk@gmail.com> Тарас Буник <tbunyk@gmail.com>
Фахриддин Балтаев <faxriddinjon@gmail.com> Фахриддин Балтаев <faxriddinjon@gmail.com>

View file

@ -118,6 +118,15 @@ as well as support for rendering them to HTML, Markdown, and text.
and <code>GOGCCFLAGS</code> variables it reports. and <code>GOGCCFLAGS</code> variables it reports.
</p> </p>
<p><!-- https://go.dev/issue/29666 -->
<code>go</code> <code>list</code> <code>-json</code> now accepts a
comma-separated list of JSON fields to populate. If a list is specified,
the JSON output will include only those fields, and
<code>go</code> <code>list</code> may avoid work to compute fields that are
not included. In some cases, this may suppress errors that would otherwise
be reported.
</p>
<p><!-- CL 410821 --> <p><!-- CL 410821 -->
The <code>go</code> command now caches information necessary to load some modules, The <code>go</code> command now caches information necessary to load some modules,
which should result in a speed-up of some <code>go</code> <code>list</code> invocations. which should result in a speed-up of some <code>go</code> <code>list</code> invocations.

View file

@ -20,7 +20,10 @@ import (
) )
func testSetgidStress(t *testing.T) { func testSetgidStress(t *testing.T) {
const N = 1000 var N = 1000
if testing.Short() {
N = 50
}
ch := make(chan int, N) ch := make(chan int, N)
for i := 0; i < N; i++ { for i := 0; i < N; i++ {
go func() { go func() {

View file

@ -205,6 +205,7 @@ func genHeader(t *testing.T, header, dir string) {
func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) { func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) {
t.Helper() t.Helper()
cmd := exec.Command(buildcmd[0], buildcmd[1:]...) cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
cmd.Env = append(cmd.Environ(), "GO111MODULE=off") // 'go install' only works in GOPATH mode
t.Log(buildcmd) t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil { if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out) t.Logf("%s", out)
@ -238,7 +239,7 @@ func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) {
binArgs := append(cmdToRun(exe), "arg1", "arg2") binArgs := append(cmdToRun(exe), "arg1", "arg2")
cmd = exec.Command(binArgs[0], binArgs[1:]...) cmd = exec.Command(binArgs[0], binArgs[1:]...)
if runtime.Compiler == "gccgo" { if runtime.Compiler == "gccgo" {
cmd.Env = append(os.Environ(), "GCCGO=1") cmd.Env = append(cmd.Environ(), "GCCGO=1")
} }
if out, err := cmd.CombinedOutput(); err != nil { if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out) t.Logf("%s", out)
@ -822,9 +823,15 @@ func TestPIE(t *testing.T) {
t.Skipf("skipping PIE test on %s", GOOS) t.Skipf("skipping PIE test on %s", GOOS)
} }
libgoa := "libgo.a"
if runtime.Compiler == "gccgo" {
libgoa = "liblibgo.a"
}
if !testWork { if !testWork {
defer func() { defer func() {
os.Remove("testp" + exeSuffix) os.Remove("testp" + exeSuffix)
os.Remove(libgoa)
os.RemoveAll(filepath.Join(GOPATH, "pkg")) os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}() }()
} }
@ -837,18 +844,13 @@ func TestPIE(t *testing.T) {
// be running this test in a GOROOT owned by root.) // be running this test in a GOROOT owned by root.)
genHeader(t, "p.h", "./p") genHeader(t, "p.h", "./p")
cmd := exec.Command("go", "install", "-buildmode=c-archive", "./libgo") cmd := exec.Command("go", "build", "-buildmode=c-archive", "./libgo")
if out, err := cmd.CombinedOutput(); err != nil { if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out) t.Logf("%s", out)
t.Fatal(err) t.Fatal(err)
} }
libgoa := "libgo.a" ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", libgoa)
if runtime.Compiler == "gccgo" {
libgoa = "liblibgo.a"
}
ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", filepath.Join(libgodir, libgoa))
if runtime.Compiler == "gccgo" { if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo") ccArgs = append(ccArgs, "-lgo")
} }
@ -1035,6 +1037,7 @@ func TestCachedInstall(t *testing.T) {
buildcmd := []string{"go", "install", "-buildmode=c-archive", "./libgo"} buildcmd := []string{"go", "install", "-buildmode=c-archive", "./libgo"}
cmd := exec.Command(buildcmd[0], buildcmd[1:]...) cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
cmd.Env = append(cmd.Environ(), "GO111MODULE=off") // 'go install' only works in GOPATH mode
t.Log(buildcmd) t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil { if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out) t.Logf("%s", out)
@ -1050,6 +1053,7 @@ func TestCachedInstall(t *testing.T) {
} }
cmd = exec.Command(buildcmd[0], buildcmd[1:]...) cmd = exec.Command(buildcmd[0], buildcmd[1:]...)
cmd.Env = append(cmd.Environ(), "GO111MODULE=off")
t.Log(buildcmd) t.Log(buildcmd)
if out, err := cmd.CombinedOutput(); err != nil { if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out) t.Logf("%s", out)

View file

@ -151,16 +151,22 @@ func testMain(m *testing.M) int {
// The installation directory format varies depending on the platform. // The installation directory format varies depending on the platform.
output, err := exec.Command("go", "list", output, err := exec.Command("go", "list",
"-buildmode=c-shared", "-buildmode=c-shared",
"-installsuffix", "testcshared",
"-f", "{{.Target}}", "-f", "{{.Target}}",
"./libgo").CombinedOutput() "runtime/cgo").CombinedOutput()
if err != nil { if err != nil {
log.Panicf("go list failed: %v\n%s", err, output) log.Panicf("go list failed: %v\n%s", err, output)
} }
target := string(bytes.TrimSpace(output)) runtimeCgoTarget := string(bytes.TrimSpace(output))
libgoname = filepath.Base(target) libSuffix = strings.TrimPrefix(filepath.Ext(runtimeCgoTarget), ".")
installdir = filepath.Dir(target)
libSuffix = strings.TrimPrefix(filepath.Ext(target), ".") defer func() {
if installdir != "" {
err := os.RemoveAll(installdir)
if err != nil {
log.Panic(err)
}
}
}()
return m.Run() return m.Run()
} }
@ -284,8 +290,13 @@ func createHeaders() error {
} }
// Generate a C header file for libgo itself. // Generate a C header file for libgo itself.
args = []string{"go", "install", "-buildmode=c-shared", installdir, err = os.MkdirTemp("", "testcshared")
"-installsuffix", "testcshared", "./libgo"} if err != nil {
return err
}
libgoname = "libgo." + libSuffix
args = []string{"go", "build", "-buildmode=c-shared", "-o", filepath.Join(installdir, libgoname), "./libgo"}
cmd = exec.Command(args[0], args[1:]...) cmd = exec.Command(args[0], args[1:]...)
out, err = cmd.CombinedOutput() out, err = cmd.CombinedOutput()
if err != nil { if err != nil {
@ -373,6 +384,7 @@ func createHeadersOnce(t *testing.T) {
headersErr = createHeaders() headersErr = createHeaders()
}) })
if headersErr != nil { if headersErr != nil {
t.Helper()
t.Fatal(headersErr) t.Fatal(headersErr)
} }
} }
@ -705,12 +717,15 @@ func TestCachedInstall(t *testing.T) {
copyFile(t, filepath.Join(tmpdir, "src", "testcshared", "libgo", "libgo.go"), filepath.Join("libgo", "libgo.go")) copyFile(t, filepath.Join(tmpdir, "src", "testcshared", "libgo", "libgo.go"), filepath.Join("libgo", "libgo.go"))
copyFile(t, filepath.Join(tmpdir, "src", "testcshared", "p", "p.go"), filepath.Join("p", "p.go")) copyFile(t, filepath.Join(tmpdir, "src", "testcshared", "p", "p.go"), filepath.Join("p", "p.go"))
env := append(os.Environ(), "GOPATH="+tmpdir, "GOBIN="+filepath.Join(tmpdir, "bin"))
buildcmd := []string{"go", "install", "-x", "-buildmode=c-shared", "-installsuffix", "testcshared", "./libgo"} buildcmd := []string{"go", "install", "-x", "-buildmode=c-shared", "-installsuffix", "testcshared", "./libgo"}
cmd := exec.Command(buildcmd[0], buildcmd[1:]...) cmd := exec.Command(buildcmd[0], buildcmd[1:]...)
cmd.Dir = filepath.Join(tmpdir, "src", "testcshared") cmd.Dir = filepath.Join(tmpdir, "src", "testcshared")
env := append(cmd.Environ(),
"GOPATH="+tmpdir,
"GOBIN="+filepath.Join(tmpdir, "bin"),
"GO111MODULE=off", // 'go install' only works in GOPATH mode
)
cmd.Env = env cmd.Env = env
t.Log(buildcmd) t.Log(buildcmd)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()

View file

@ -108,6 +108,15 @@ func testMain(m *testing.M) (int, error) {
defer os.RemoveAll(workDir) defer os.RemoveAll(workDir)
} }
// -buildmode=shared fundamentally does not work in module mode.
// (It tries to share package dependencies across builds, but in module mode
// each module has its own distinct set of dependency versions.)
// We would like to eliminate it (see https://go.dev/issue/47788),
// but first need to figure out a replacement that covers the small subset
// of use-cases where -buildmode=shared still works today.
// For now, run the tests in GOPATH mode only.
os.Setenv("GO111MODULE", "off")
// Some tests need to edit the source in GOPATH, so copy this directory to a // Some tests need to edit the source in GOPATH, so copy this directory to a
// temporary directory and chdir to that. // temporary directory and chdir to that.
gopath := filepath.Join(workDir, "gopath") gopath := filepath.Join(workDir, "gopath")

View file

@ -68,9 +68,6 @@ Flags:
-importcfg file -importcfg file
Read import configuration from file. Read import configuration from file.
In the file, set importmap, packagefile to specify import resolution. In the file, set importmap, packagefile to specify import resolution.
-importmap old=new
Interpret import "old" as import "new" during compilation.
The option may be repeated to add multiple mappings.
-installsuffix suffix -installsuffix suffix
Look for packages in $GOROOT/pkg/$GOOS_$GOARCH_suffix Look for packages in $GOROOT/pkg/$GOOS_$GOARCH_suffix
instead of $GOROOT/pkg/$GOOS_$GOARCH. instead of $GOROOT/pkg/$GOOS_$GOARCH.

View file

@ -70,7 +70,6 @@ var NoInstrumentPkgs = []string{
"runtime/msan", "runtime/msan",
"runtime/asan", "runtime/asan",
"internal/cpu", "internal/cpu",
"buildcfg",
} }
// Don't insert racefuncenter/racefuncexit into the following packages. // Don't insert racefuncenter/racefuncexit into the following packages.

View file

@ -100,7 +100,6 @@ type CmdFlags struct {
GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
GoVersion string "help:\"required version of the runtime\"" GoVersion string "help:\"required version of the runtime\""
ImportCfg func(string) "help:\"read import configuration from `file`\"" ImportCfg func(string) "help:\"read import configuration from `file`\""
ImportMap func(string) "help:\"add `definition` of the form source=actual to import map\""
InstallSuffix string "help:\"set pkg directory `suffix`\"" InstallSuffix string "help:\"set pkg directory `suffix`\""
JSON string "help:\"version,file for JSON compiler/optimizer detail output\"" JSON string "help:\"version,file for JSON compiler/optimizer detail output\""
Lang string "help:\"Go language version source code expects\"" Lang string "help:\"Go language version source code expects\""
@ -130,7 +129,7 @@ type CmdFlags struct {
Files map[string]string Files map[string]string
} }
ImportDirs []string // appended to by -I ImportDirs []string // appended to by -I
ImportMap map[string]string // set by -importmap OR -importcfg ImportMap map[string]string // set by -importcfg
PackageFile map[string]string // set by -importcfg; nil means not in use PackageFile map[string]string // set by -importcfg; nil means not in use
SpectreIndex bool // set by -spectre=index or -spectre=all SpectreIndex bool // set by -spectre=index or -spectre=all
// Whether we are adding any sort of code instrumentation, such as // Whether we are adding any sort of code instrumentation, such as
@ -156,7 +155,6 @@ func ParseFlags() {
Flag.EmbedCfg = readEmbedCfg Flag.EmbedCfg = readEmbedCfg
Flag.GenDwarfInl = 2 Flag.GenDwarfInl = 2
Flag.ImportCfg = readImportCfg Flag.ImportCfg = readImportCfg
Flag.ImportMap = addImportMap
Flag.LinkShared = &Ctxt.Flag_linkshared Flag.LinkShared = &Ctxt.Flag_linkshared
Flag.Shared = &Ctxt.Flag_shared Flag.Shared = &Ctxt.Flag_shared
Flag.WB = true Flag.WB = true
@ -389,21 +387,6 @@ func addImportDir(dir string) {
} }
} }
func addImportMap(s string) {
if Flag.Cfg.ImportMap == nil {
Flag.Cfg.ImportMap = make(map[string]string)
}
if strings.Count(s, "=") != 1 {
log.Fatal("-importmap argument must be of the form source=actual")
}
i := strings.Index(s, "=")
source, actual := s[:i], s[i+1:]
if source == "" || actual == "" {
log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
}
Flag.Cfg.ImportMap[source] = actual
}
func readImportCfg(file string) { func readImportCfg(file string) {
if Flag.Cfg.ImportMap == nil { if Flag.Cfg.ImportMap == nil {
Flag.Cfg.ImportMap = make(map[string]string) Flag.Cfg.ImportMap = make(map[string]string)

View file

@ -3,6 +3,7 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:generate go run -mod=mod mknode.go //go:generate go run -mod=mod mknode.go
// Note: see comment at top of mknode.go
package ir package ir

View file

@ -5,6 +5,12 @@
//go:build ignore //go:build ignore
// +build ignore // +build ignore
// Note: this program must be run with the GOROOT
// environment variable set to the root of this tree.
// GOROOT=...
// cd $GOROOT/src/cmd/compile/internal/ir
// ../../../../../bin/go run -mod=mod mknode.go
package main package main
import ( import (
@ -154,6 +160,9 @@ func forNodeFields(named *types.Named, prologue, singleTmpl, sliceTmpl, epilogue
} }
tmpl, what := singleTmpl, types.TypeString(typ, types.RelativeTo(irPkg)) tmpl, what := singleTmpl, types.TypeString(typ, types.RelativeTo(irPkg))
if what == "go/constant.Value" {
return false
}
if implementsNode(typ) { if implementsNode(typ) {
if slice != nil { if slice != nil {
helper := strings.TrimPrefix(what, "*") + "s" helper := strings.TrimPrefix(what, "*") + "s"

View file

@ -1214,6 +1214,9 @@ func (subst *subster) node(n ir.Node) ir.Node {
if m.Tag != nil && m.Tag.Op() == ir.OTYPESW { if m.Tag != nil && m.Tag.Op() == ir.OTYPESW {
break // Nothing to do here for type switches. break // Nothing to do here for type switches.
} }
if m.Tag != nil && !types.IsComparable(m.Tag.Type()) {
break // Nothing to do here for un-comparable types.
}
if m.Tag != nil && !m.Tag.Type().IsEmptyInterface() && m.Tag.Type().HasShape() { if m.Tag != nil && !m.Tag.Type().IsEmptyInterface() && m.Tag.Type().HasShape() {
// To implement a switch on a value that is or has a type parameter, we first convert // To implement a switch on a value that is or has a type parameter, we first convert
// that thing we're switching on to an interface{}. // that thing we're switching on to an interface{}.
@ -1354,7 +1357,7 @@ func (g *genInst) dictPass(info *instInfo) {
} }
case ir.ODOTTYPE, ir.ODOTTYPE2: case ir.ODOTTYPE, ir.ODOTTYPE2:
dt := m.(*ir.TypeAssertExpr) dt := m.(*ir.TypeAssertExpr)
if !dt.Type().HasShape() && !dt.X.Type().HasShape() { if !dt.Type().HasShape() && !(dt.X.Type().HasShape() && !dt.X.Type().IsEmptyInterface()) {
break break
} }
var rtype, itab ir.Node var rtype, itab ir.Node
@ -1654,12 +1657,14 @@ func (g *genInst) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool
se := call.X.(*ir.SelectorExpr) se := call.X.(*ir.SelectorExpr)
if se.X.Type().IsShape() { if se.X.Type().IsShape() {
// This is a method call enabled by a type bound. // This is a method call enabled by a type bound.
tparam := se.X.Type()
// We need this extra check for method expressions, if call.X.Op() == ir.ODOTMETH {
// which don't add in the implicit XDOTs. // We need this extra check for method expressions,
tmpse := ir.NewSelectorExpr(src.NoXPos, ir.OXDOT, se.X, se.Sel) // which don't add in the implicit XDOTs.
tmpse = typecheck.AddImplicitDots(tmpse) tmpse := ir.NewSelectorExpr(src.NoXPos, ir.OXDOT, se.X, se.Sel)
tparam := tmpse.X.Type() tmpse = typecheck.AddImplicitDots(tmpse)
tparam = tmpse.X.Type()
}
if !tparam.IsShape() { if !tparam.IsShape() {
// The method expression is not // The method expression is not
// really on a typeparam. // really on a typeparam.

View file

@ -5,6 +5,7 @@
package ssa package ssa
import ( import (
"cmd/compile/internal/base"
"fmt" "fmt"
"math" "math"
) )
@ -90,41 +91,42 @@ func findIndVar(f *Func) []indVar {
continue continue
} }
var flags indVarFlags var ind *Value // induction variable
var ind, max *Value // induction, and maximum var init *Value // starting value
var limit *Value // ending value
// Check thet the control if it either ind </<= max or max >/>= ind. // Check thet the control if it either ind </<= limit or limit </<= ind.
// TODO: Handle 32-bit comparisons. // TODO: Handle 32-bit comparisons.
// TODO: Handle unsigned comparisons? // TODO: Handle unsigned comparisons?
c := b.Controls[0] c := b.Controls[0]
inclusive := false
switch c.Op { switch c.Op {
case OpLeq64: case OpLeq64:
flags |= indVarMaxInc inclusive = true
fallthrough fallthrough
case OpLess64: case OpLess64:
ind, max = c.Args[0], c.Args[1] ind, limit = c.Args[0], c.Args[1]
default: default:
continue continue
} }
// See if this is really an induction variable // See if this is really an induction variable
less := true less := true
min, inc, nxt := parseIndVar(ind) init, inc, nxt := parseIndVar(ind)
if min == nil { if init == nil {
// We failed to parse the induction variable. Before punting, we want to check // We failed to parse the induction variable. Before punting, we want to check
// whether the control op was written with arguments in non-idiomatic order, // whether the control op was written with the induction variable on the RHS
// so that we believe being "max" (the upper bound) is actually the induction // instead of the LHS. This happens for the downwards case, like:
// variable itself. This would happen for code like: // for i := len(n)-1; i >= 0; i--
// for i := 0; len(n) > i; i++ init, inc, nxt = parseIndVar(limit)
min, inc, nxt = parseIndVar(max) if init == nil {
if min == nil {
// No recognied induction variable on either operand // No recognied induction variable on either operand
continue continue
} }
// Ok, the arguments were reversed. Swap them, and remember that we're // Ok, the arguments were reversed. Swap them, and remember that we're
// looking at a ind >/>= loop (so the induction must be decrementing). // looking at a ind >/>= loop (so the induction must be decrementing).
ind, max = max, ind ind, limit = limit, ind
less = false less = false
} }
@ -138,8 +140,8 @@ func findIndVar(f *Func) []indVar {
} }
// Increment sign must match comparison direction. // Increment sign must match comparison direction.
// When incrementing, the termination comparison must be ind </<= max. // When incrementing, the termination comparison must be ind </<= limit.
// When decrementing, the termination comparison must be ind >/>= max. // When decrementing, the termination comparison must be ind >/>= limit.
// See issue 26116. // See issue 26116.
if step > 0 && !less { if step > 0 && !less {
continue continue
@ -148,177 +150,229 @@ func findIndVar(f *Func) []indVar {
continue continue
} }
// If the increment is negative, swap min/max and their flags
if step < 0 {
min, max = max, min
oldf := flags
flags = indVarMaxInc
if oldf&indVarMaxInc == 0 {
flags |= indVarMinExc
}
step = -step
}
if flags&indVarMaxInc != 0 && max.Op == OpConst64 && max.AuxInt+step < max.AuxInt {
// For a <= comparison, we need to make sure that a value equal to
// max can be incremented without overflowing.
// (For a < comparison, the %step check below ensures no overflow.)
continue
}
// Up to now we extracted the induction variable (ind), // Up to now we extracted the induction variable (ind),
// the increment delta (inc), the temporary sum (nxt), // the increment delta (inc), the temporary sum (nxt),
// the minimum value (min) and the maximum value (max). // the initial value (init) and the limiting value (limit).
// //
// We also know that ind has the form (Phi min nxt) where // We also know that ind has the form (Phi init nxt) where
// nxt is (Add inc nxt) which means: 1) inc dominates nxt // nxt is (Add inc nxt) which means: 1) inc dominates nxt
// and 2) there is a loop starting at inc and containing nxt. // and 2) there is a loop starting at inc and containing nxt.
// //
// We need to prove that the induction variable is incremented // We need to prove that the induction variable is incremented
// only when it's smaller than the maximum value. // only when it's smaller than the limiting value.
// Two conditions must happen listed below to accept ind // Two conditions must happen listed below to accept ind
// as an induction variable. // as an induction variable.
// First condition: loop entry has a single predecessor, which // First condition: loop entry has a single predecessor, which
// is the header block. This implies that b.Succs[0] is // is the header block. This implies that b.Succs[0] is
// reached iff ind < max. // reached iff ind < limit.
if len(b.Succs[0].b.Preds) != 1 { if len(b.Succs[0].b.Preds) != 1 {
// b.Succs[1] must exit the loop. // b.Succs[1] must exit the loop.
continue continue
} }
// Second condition: b.Succs[0] dominates nxt so that // Second condition: b.Succs[0] dominates nxt so that
// nxt is computed when inc < max, meaning nxt <= max. // nxt is computed when inc < limit.
if !sdom.IsAncestorEq(b.Succs[0].b, nxt.Block) { if !sdom.IsAncestorEq(b.Succs[0].b, nxt.Block) {
// inc+ind can only be reached through the branch that enters the loop. // inc+ind can only be reached through the branch that enters the loop.
continue continue
} }
// We can only guarantee that the loop runs within limits of induction variable // Check for overflow/underflow. We need to make sure that inc never causes
// if (one of) // the induction variable to wrap around.
// (1) the increment is ±1 // We use a function wrapper here for easy return true / return false / keep going logic.
// (2) the limits are constants // This function returns true if the increment will never overflow/underflow.
// (3) loop is of the form k0 upto Known_not_negative-k inclusive, step <= k ok := func() bool {
// (4) loop is of the form k0 upto Known_not_negative-k exclusive, step <= k+1 if step > 0 {
// (5) loop is of the form Known_not_negative downto k0, minint+step < k0 if limit.Op == OpConst64 {
if step > 1 { // Figure out the actual largest value.
ok := false v := limit.AuxInt
if min.Op == OpConst64 && max.Op == OpConst64 { if !inclusive {
if max.AuxInt > min.AuxInt && max.AuxInt%step == min.AuxInt%step { // handle overflow if v == math.MinInt64 {
ok = true return false // < minint is never satisfiable.
}
}
// Handle induction variables of these forms.
// KNN is known-not-negative.
// SIGNED ARITHMETIC ONLY. (see switch on c above)
// Possibilities for KNN are len and cap; perhaps we can infer others.
// for i := 0; i <= KNN-k ; i += k
// for i := 0; i < KNN-(k-1); i += k
// Also handle decreasing.
// "Proof" copied from https://go-review.googlesource.com/c/go/+/104041/10/src/cmd/compile/internal/ssa/loopbce.go#164
//
// In the case of
// // PC is Positive Constant
// L := len(A)-PC
// for i := 0; i < L; i = i+PC
//
// we know:
//
// 0 + PC does not over/underflow.
// len(A)-PC does not over/underflow
// maximum value for L is MaxInt-PC
// i < L <= MaxInt-PC means i + PC < MaxInt hence no overflow.
// To match in SSA:
// if (a) min.Op == OpConst64(k0)
// and (b) k0 >= MININT + step
// and (c) max.Op == OpSubtract(Op{StringLen,SliceLen,SliceCap}, k)
// or (c) max.Op == OpAdd(Op{StringLen,SliceLen,SliceCap}, -k)
// or (c) max.Op == Op{StringLen,SliceLen,SliceCap}
// and (d) if upto loop, require indVarMaxInc && step <= k or !indVarMaxInc && step-1 <= k
if min.Op == OpConst64 && min.AuxInt >= step+math.MinInt64 {
knn := max
k := int64(0)
var kArg *Value
switch max.Op {
case OpSub64:
knn = max.Args[0]
kArg = max.Args[1]
case OpAdd64:
knn = max.Args[0]
kArg = max.Args[1]
if knn.Op == OpConst64 {
knn, kArg = kArg, knn
}
}
switch knn.Op {
case OpSliceLen, OpStringLen, OpSliceCap:
default:
knn = nil
}
if kArg != nil && kArg.Op == OpConst64 {
k = kArg.AuxInt
if max.Op == OpAdd64 {
k = -k
}
}
if k >= 0 && knn != nil {
if inc.AuxInt > 0 { // increasing iteration
// The concern for the relation between step and k is to ensure that iv never exceeds knn
// i.e., iv < knn-(K-1) ==> iv + K <= knn; iv <= knn-K ==> iv +K < knn
if step <= k || flags&indVarMaxInc == 0 && step-1 == k {
ok = true
} }
} else { // decreasing iteration v--
// Will be decrementing from max towards min; max is knn-k; will only attempt decrement if
// knn-k >[=] min; underflow is only a concern if min-step is not smaller than min.
// This all assumes signed integer arithmetic
// This is already assured by the test above: min.AuxInt >= step+math.MinInt64
ok = true
} }
if init.Op == OpConst64 {
// Use stride to compute a better lower limit.
if init.AuxInt > v {
return false
}
v = addU(init.AuxInt, diff(v, init.AuxInt)/uint64(step)*uint64(step))
}
// It is ok if we can't overflow when incrementing from the largest value.
return !addWillOverflow(v, step)
}
if step == 1 && !inclusive {
// Can't overflow because maxint is never a possible value.
return true
}
// If the limit is not a constant, check to see if it is a
// negative offset from a known non-negative value.
knn, k := findKNN(limit)
if knn == nil || k < 0 {
return false
}
// limit == (something nonnegative) - k. That subtraction can't underflow, so
// we can trust it.
if inclusive {
// ind <= knn - k cannot overflow if step is at most k
return step <= k
}
// ind < knn - k cannot overflow if step is at most k+1
return step <= k+1 && k != math.MaxInt64
} else { // step < 0
if limit.Op == OpConst64 {
// Figure out the actual smallest value.
v := limit.AuxInt
if !inclusive {
if v == math.MaxInt64 {
return false // > maxint is never satisfiable.
}
v++
}
if init.Op == OpConst64 {
// Use stride to compute a better lower limit.
if init.AuxInt < v {
return false
}
v = subU(init.AuxInt, diff(init.AuxInt, v)/uint64(-step)*uint64(-step))
}
// It is ok if we can't underflow when decrementing from the smallest value.
return !subWillUnderflow(v, -step)
}
if step == -1 && !inclusive {
// Can't underflow because minint is never a possible value.
return true
} }
} }
return false
// TODO: other unrolling idioms }
// for i := 0; i < KNN - KNN % k ; i += k
// for i := 0; i < KNN&^(k-1) ; i += k // k a power of 2
// for i := 0; i < KNN&(-k) ; i += k // k a power of 2
if !ok { if ok() {
continue flags := indVarFlags(0)
var min, max *Value
if step > 0 {
min = init
max = limit
if inclusive {
flags |= indVarMaxInc
}
} else {
min = limit
max = init
flags |= indVarMaxInc
if !inclusive {
flags |= indVarMinExc
}
step = -step
} }
if f.pass.debug >= 1 {
printIndVar(b, ind, min, max, step, flags)
}
iv = append(iv, indVar{
ind: ind,
min: min,
max: max,
entry: b.Succs[0].b,
flags: flags,
})
b.Logf("found induction variable %v (inc = %v, min = %v, max = %v)\n", ind, inc, min, max)
} }
if f.pass.debug >= 1 { // TODO: other unrolling idioms
printIndVar(b, ind, min, max, step, flags) // for i := 0; i < KNN - KNN % k ; i += k
} // for i := 0; i < KNN&^(k-1) ; i += k // k a power of 2
// for i := 0; i < KNN&(-k) ; i += k // k a power of 2
iv = append(iv, indVar{
ind: ind,
min: min,
max: max,
entry: b.Succs[0].b,
flags: flags,
})
b.Logf("found induction variable %v (inc = %v, min = %v, max = %v)\n", ind, inc, min, max)
} }
return iv return iv
} }
func dropAdd64(v *Value) (*Value, int64) { // addWillOverflow reports whether x+y would result in a value more than maxint.
if v.Op == OpAdd64 && v.Args[0].Op == OpConst64 { func addWillOverflow(x, y int64) bool {
return v.Args[1], v.Args[0].AuxInt return x+y < x
}
// subWillUnderflow reports whether x-y would result in a value less than minint.
func subWillUnderflow(x, y int64) bool {
return x-y > x
}
// diff returns x-y as a uint64. Requires x>=y.
func diff(x, y int64) uint64 {
if x < y {
base.Fatalf("diff %d - %d underflowed", x, y)
} }
if v.Op == OpAdd64 && v.Args[1].Op == OpConst64 { return uint64(x - y)
return v.Args[0], v.Args[1].AuxInt }
// addU returns x+y. Requires that x+y does not overflow an int64.
func addU(x int64, y uint64) int64 {
if y >= 1<<63 {
if x >= 0 {
base.Fatalf("addU overflowed %d + %d", x, y)
}
x += 1<<63 - 1
x += 1
y -= 1 << 63
} }
return v, 0 if addWillOverflow(x, int64(y)) {
base.Fatalf("addU overflowed %d + %d", x, y)
}
return x + int64(y)
}
// subU returns x-y. Requires that x-y does not underflow an int64.
func subU(x int64, y uint64) int64 {
if y >= 1<<63 {
if x < 0 {
base.Fatalf("subU underflowed %d - %d", x, y)
}
x -= 1<<63 - 1
x -= 1
y -= 1 << 63
}
if subWillUnderflow(x, int64(y)) {
base.Fatalf("subU underflowed %d - %d", x, y)
}
return x - int64(y)
}
// if v is known to be x - c, where x is known to be nonnegative and c is a
// constant, return x, c. Otherwise return nil, 0.
func findKNN(v *Value) (*Value, int64) {
var x, y *Value
x = v
switch v.Op {
case OpSub64:
x = v.Args[0]
y = v.Args[1]
case OpAdd64:
x = v.Args[0]
y = v.Args[1]
if x.Op == OpConst64 {
x, y = y, x
}
}
switch x.Op {
case OpSliceLen, OpStringLen, OpSliceCap:
default:
return nil, 0
}
if y == nil {
return x, 0
}
if y.Op != OpConst64 {
return nil, 0
}
if v.Op == OpAdd64 {
return x, -y.AuxInt
}
return x, y.AuxInt
} }
func printIndVar(b *Block, i, min, max *Value, inc int64, flags indVarFlags) { func printIndVar(b *Block, i, min, max *Value, inc int64, flags indVarFlags) {

View file

@ -169,6 +169,8 @@ func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
} }
// Special case: sync/atomic.align64 is an empty struct we recognize // Special case: sync/atomic.align64 is an empty struct we recognize
// as a signal that the struct it contains must be 64-bit-aligned. // as a signal that the struct it contains must be 64-bit-aligned.
//
// This logic is duplicated in go/types and cmd/compile/internal/types2.
if isStruct && t.NumFields() == 0 && t.Sym() != nil && t.Sym().Name == "align64" && isAtomicStdPkg(t.Sym().Pkg) { if isStruct && t.NumFields() == 0 && t.Sym() != nil && t.Sym().Name == "align64" && isAtomicStdPkg(t.Sym().Pkg) {
maxalign = 8 maxalign = 8
} }

View file

@ -418,7 +418,8 @@ func (conf *Config) Check(path string, files []*syntax.File, info *Info) (*Packa
// AssertableTo reports whether a value of type V can be asserted to have type T. // AssertableTo reports whether a value of type V can be asserted to have type T.
// //
// The behavior of AssertableTo is undefined in two cases: // The behavior of AssertableTo is unspecified in three cases:
// - if T is Typ[Invalid]
// - if V is a generalized interface; i.e., an interface that may only be used // - if V is a generalized interface; i.e., an interface that may only be used
// as a type constraint in Go code // as a type constraint in Go code
// - if T is an uninstantiated generic type // - if T is an uninstantiated generic type
@ -434,8 +435,8 @@ func AssertableTo(V *Interface, T Type) bool {
// AssignableTo reports whether a value of type V is assignable to a variable // AssignableTo reports whether a value of type V is assignable to a variable
// of type T. // of type T.
// //
// The behavior of AssignableTo is undefined if V or T is an uninstantiated // The behavior of AssignableTo is unspecified if V or T is Typ[Invalid] or an
// generic type. // uninstantiated generic type.
func AssignableTo(V, T Type) bool { func AssignableTo(V, T Type) bool {
x := operand{mode: value, typ: V} x := operand{mode: value, typ: V}
ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x
@ -445,8 +446,8 @@ func AssignableTo(V, T Type) bool {
// ConvertibleTo reports whether a value of type V is convertible to a value of // ConvertibleTo reports whether a value of type V is convertible to a value of
// type T. // type T.
// //
// The behavior of ConvertibleTo is undefined if V or T is an uninstantiated // The behavior of ConvertibleTo is unspecified if V or T is Typ[Invalid] or an
// generic type. // uninstantiated generic type.
func ConvertibleTo(V, T Type) bool { func ConvertibleTo(V, T Type) bool {
x := operand{mode: value, typ: V} x := operand{mode: value, typ: V}
return x.convertibleTo(nil, T, nil) // check not needed for non-constant x return x.convertibleTo(nil, T, nil) // check not needed for non-constant x
@ -454,8 +455,8 @@ func ConvertibleTo(V, T Type) bool {
// Implements reports whether type V implements interface T. // Implements reports whether type V implements interface T.
// //
// The behavior of Implements is undefined if V is an uninstantiated generic // The behavior of Implements is unspecified if V is Typ[Invalid] or an uninstantiated
// type. // generic type.
func Implements(V Type, T *Interface) bool { func Implements(V Type, T *Interface) bool {
if T.Empty() { if T.Empty() {
// All types (even Typ[Invalid]) implement the empty interface. // All types (even Typ[Invalid]) implement the empty interface.

View file

@ -53,6 +53,17 @@ func (s *StdSizes) Alignof(T Type) int64 {
// is the same as unsafe.Alignof(x[0]), but at least 1." // is the same as unsafe.Alignof(x[0]), but at least 1."
return s.Alignof(t.elem) return s.Alignof(t.elem)
case *Struct: case *Struct:
if len(t.fields) == 0 && isSyncAtomicAlign64(T) {
// Special case: sync/atomic.align64 is an
// empty struct we recognize as a signal that
// the struct it contains must be
// 64-bit-aligned.
//
// This logic is equivalent to the logic in
// cmd/compile/internal/types/size.go:calcStructOffset
return 8
}
// spec: "For a variable x of struct type: unsafe.Alignof(x) // spec: "For a variable x of struct type: unsafe.Alignof(x)
// is the largest of the values unsafe.Alignof(x.f) for each // is the largest of the values unsafe.Alignof(x.f) for each
// field f of x, but at least 1." // field f of x, but at least 1."
@ -93,6 +104,18 @@ func (s *StdSizes) Alignof(T Type) int64 {
return a return a
} }
func isSyncAtomicAlign64(T Type) bool {
named, ok := T.(*Named)
if !ok {
return false
}
obj := named.Obj()
return obj.Name() == "align64" &&
obj.Pkg() != nil &&
(obj.Pkg().Path() == "sync/atomic" ||
obj.Pkg().Path() == "runtime/internal/atomic")
}
func (s *StdSizes) Offsetsof(fields []*Var) []int64 { func (s *StdSizes) Offsetsof(fields []*Var) []int64 {
offsets := make([]int64, len(fields)) offsets := make([]int64, len(fields))
var o int64 var o int64

View file

@ -14,12 +14,15 @@ import (
// findStructType typechecks src and returns the first struct type encountered. // findStructType typechecks src and returns the first struct type encountered.
func findStructType(t *testing.T, src string) *types2.Struct { func findStructType(t *testing.T, src string) *types2.Struct {
return findStructTypeConfig(t, src, &types2.Config{})
}
func findStructTypeConfig(t *testing.T, src string, conf *types2.Config) *types2.Struct {
f, err := parseSrc("x.go", src) f, err := parseSrc("x.go", src)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
info := types2.Info{Types: make(map[syntax.Expr]types2.TypeAndValue)} info := types2.Info{Types: make(map[syntax.Expr]types2.TypeAndValue)}
var conf types2.Config
_, err = conf.Check("x", []*syntax.File{f}, &info) _, err = conf.Check("x", []*syntax.File{f}, &info)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -105,3 +108,39 @@ const _ = unsafe.Offsetof(struct{ x int64 }{}.x)
_ = conf.Sizes.Alignof(tv.Type) _ = conf.Sizes.Alignof(tv.Type)
} }
} }
// Issue #53884.
func TestAtomicAlign(t *testing.T) {
const src = `
package main
import "sync/atomic"
var s struct {
x int32
y atomic.Int64
z int64
}
`
want := []int64{0, 8, 16}
for _, arch := range []string{"386", "amd64"} {
t.Run(arch, func(t *testing.T) {
conf := types2.Config{
Importer: defaultImporter(),
Sizes: types2.SizesFor("gc", arch),
}
ts := findStructTypeConfig(t, src, &conf)
var fields []*types2.Var
// Make a copy manually :(
for i := 0; i < ts.NumFields(); i++ {
fields = append(fields, ts.Field(i))
}
offsets := conf.Sizes.Offsetsof(fields)
if offsets[0] != want[0] || offsets[1] != want[1] || offsets[2] != want[2] {
t.Errorf("OffsetsOf(%v) = %v want %v", ts, offsets, want)
}
})
}
}

View file

@ -31,10 +31,8 @@ type x7[A any] struct{ foo7 }
func main7() { var _ foo7 = x7[int]{} } func main7() { var _ foo7 = x7[int]{} }
// crash 8 // crash 8
// Embedding stand-alone type parameters is not permitted for now. Disabled. type foo8[A any] interface { ~A /* ERROR cannot be a type parameter */ }
// type foo8[A any] interface { ~A } func bar8[A foo8[A]](a A) {}
// func bar8[A foo8[A]](a A) {}
// func main8() {}
// crash 9 // crash 9
type foo9[A any] interface { foo9 /* ERROR illegal cycle */ [A] } type foo9[A any] interface { foo9 /* ERROR illegal cycle */ [A] }
@ -74,10 +72,9 @@ func F20[t Z20]() { F20(t /* ERROR invalid composite literal type */ {}) }
type Z21 /* ERROR illegal cycle */ interface{ Z21 } type Z21 /* ERROR illegal cycle */ interface{ Z21 }
func F21[T Z21]() { ( /* ERROR not used */ F21[Z21]) } func F21[T Z21]() { ( /* ERROR not used */ F21[Z21]) }
// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639). // crash 24
// // crash 24 type T24[P any] P // ERROR cannot use a type parameter as RHS in type declaration
// type T24[P any] P func (r T24[P]) m() { T24 /* ERROR without instantiation */ .m() }
// func (r T24[P]) m() { T24 /* ERROR without instantiation */ .m() }
// crash 25 // crash 25
type T25[A any] int type T25[A any] int

View file

@ -63,7 +63,7 @@ func order(fn *ir.Func) {
s := fmt.Sprintf("\nbefore order %v", fn.Sym()) s := fmt.Sprintf("\nbefore order %v", fn.Sym())
ir.DumpList(s, fn.Body) ir.DumpList(s, fn.Body)
} }
ir.SetPos(fn) // Set reasonable position for instrumenting code. See issue 53688.
orderBlock(&fn.Body, map[string][]*ir.Name{}) orderBlock(&fn.Body, map[string][]*ir.Name{})
} }
@ -477,6 +477,12 @@ func (o *orderState) edge() {
// and then replaces the old slice in n with the new slice. // and then replaces the old slice in n with the new slice.
// free is a map that can be used to obtain temporary variables by type. // free is a map that can be used to obtain temporary variables by type.
func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) { func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
if len(*n) != 0 {
// Set reasonable position for instrumenting code. See issue 53688.
// It would be nice if ir.Nodes had a position (the opening {, probably),
// but it doesn't. So we use the first statement's position instead.
ir.SetPos((*n)[0])
}
var order orderState var order orderState
order.free = free order.free = free
mark := order.markTemp() mark := order.markTemp()

View file

@ -204,6 +204,8 @@ func bootstrapBuildTools() {
// https://groups.google.com/d/msg/golang-dev/Ss7mCKsvk8w/Gsq7VYI0AwAJ // https://groups.google.com/d/msg/golang-dev/Ss7mCKsvk8w/Gsq7VYI0AwAJ
// Use the math_big_pure_go build tag to disable the assembly in math/big // Use the math_big_pure_go build tag to disable the assembly in math/big
// which may contain unsupported instructions. // which may contain unsupported instructions.
// Use the purego build tag to disable other assembly code,
// such as in cmd/internal/notsha256.
// Note that if we are using Go 1.10 or later as bootstrap, the -gcflags=-l // Note that if we are using Go 1.10 or later as bootstrap, the -gcflags=-l
// only applies to the final cmd/go binary, but that's OK: if this is Go 1.10 // only applies to the final cmd/go binary, but that's OK: if this is Go 1.10
// or later we don't need to disable inlining to work around bugs in the Go 1.4 compiler. // or later we don't need to disable inlining to work around bugs in the Go 1.4 compiler.
@ -211,7 +213,7 @@ func bootstrapBuildTools() {
pathf("%s/bin/go", goroot_bootstrap), pathf("%s/bin/go", goroot_bootstrap),
"install", "install",
"-gcflags=-l", "-gcflags=-l",
"-tags=math_big_pure_go compiler_bootstrap", "-tags=math_big_pure_go compiler_bootstrap purego",
} }
if vflag > 0 { if vflag > 0 {
cmd = append(cmd, "-v") cmd = append(cmd, "-v")

View file

@ -195,11 +195,10 @@
// For example, when building with a non-standard configuration, // For example, when building with a non-standard configuration,
// use -pkgdir to keep generated packages in a separate location. // use -pkgdir to keep generated packages in a separate location.
// -tags tag,list // -tags tag,list
// a comma-separated list of build tags to consider satisfied during the // a comma-separated list of additional build tags to consider satisfied
// build. For more information about build tags, see the description of // during the build. For more information about build tags, see
// build constraints in the documentation for the go/build package. // 'go help buildconstraint'. (Earlier versions of Go used a
// (Earlier versions of Go used a space-separated list, and that form // space-separated list, and that form is deprecated but still recognized.)
// is deprecated but still recognized.)
// -trimpath // -trimpath
// remove all file system paths from the resulting executable. // remove all file system paths from the resulting executable.
// Instead of absolute file system paths, the recorded file names // Instead of absolute file system paths, the recorded file names
@ -931,6 +930,7 @@
// //
// type Module struct { // type Module struct {
// Path string // module path // Path string // module path
// Query string // version query corresponding to this version
// Version string // module version // Version string // module version
// Versions []string // available module versions // Versions []string // available module versions
// Replace *Module // replaced by this module // Replace *Module // replaced by this module
@ -944,6 +944,8 @@
// Retracted []string // retraction information, if any (with -retracted or -u) // Retracted []string // retraction information, if any (with -retracted or -u)
// Deprecated string // deprecation message, if any (with -u) // Deprecated string // deprecation message, if any (with -u)
// Error *ModuleError // error loading module // Error *ModuleError // error loading module
// Origin any // provenance of module
// Reuse bool // reuse of old module info is safe
// } // }
// //
// type ModuleError struct { // type ModuleError struct {
@ -1020,6 +1022,16 @@
// module as a Module struct. If an error occurs, the result will // module as a Module struct. If an error occurs, the result will
// be a Module struct with a non-nil Error field. // be a Module struct with a non-nil Error field.
// //
// When using -m, the -reuse=old.json flag accepts the name of file containing
// the JSON output of a previous 'go list -m -json' invocation with the
// same set of modifier flags (such as -u, -retracted, and -versions).
// The go command may use this file to determine that a module is unchanged
// since the previous invocation and avoid redownloading information about it.
// Modules that are not redownloaded will be marked in the new output by
// setting the Reuse field to true. Normally the module cache provides this
// kind of reuse automatically; the -reuse flag can be useful on systems that
// do not preserve the module cache.
//
// For more about build flags, see 'go help build'. // For more about build flags, see 'go help build'.
// //
// For more about specifying packages, see 'go help packages'. // For more about specifying packages, see 'go help packages'.
@ -1056,7 +1068,7 @@
// //
// Usage: // Usage:
// //
// go mod download [-x] [-json] [modules] // go mod download [-x] [-json] [-reuse=old.json] [modules]
// //
// Download downloads the named modules, which can be module patterns selecting // Download downloads the named modules, which can be module patterns selecting
// dependencies of the main module or module queries of the form path@version. // dependencies of the main module or module queries of the form path@version.
@ -1079,6 +1091,7 @@
// //
// type Module struct { // type Module struct {
// Path string // module path // Path string // module path
// Query string // version query corresponding to this version
// Version string // module version // Version string // module version
// Error string // error loading module // Error string // error loading module
// Info string // absolute path to cached .info file // Info string // absolute path to cached .info file
@ -1087,8 +1100,18 @@
// Dir string // absolute path to cached source root directory // Dir string // absolute path to cached source root directory
// Sum string // checksum for path, version (as in go.sum) // Sum string // checksum for path, version (as in go.sum)
// GoModSum string // checksum for go.mod (as in go.sum) // GoModSum string // checksum for go.mod (as in go.sum)
// Origin any // provenance of module
// Reuse bool // reuse of old module info is safe
// } // }
// //
// The -reuse flag accepts the name of file containing the JSON output of a
// previous 'go mod download -json' invocation. The go command may use this
// file to determine that a module is unchanged since the previous invocation
// and avoid redownloading it. Modules that are not redownloaded will be marked
// in the new output by setting the Reuse field to true. Normally the module
// cache provides this kind of reuse automatically; the -reuse flag can be
// useful on systems that do not preserve the module cache.
//
// The -x flag causes download to print the commands download executes. // The -x flag causes download to print the commands download executes.
// //
// See https://golang.org/ref/mod#go-mod-download for more about 'go mod download'. // See https://golang.org/ref/mod#go-mod-download for more about 'go mod download'.
@ -1797,11 +1820,12 @@
// //
// # Build constraints // # Build constraints
// //
// A build constraint, also known as a build tag, is a line comment that begins // A build constraint, also known as a build tag, is a condition under which a
// file should be included in the package. Build constraints are given by a
// line comment that begins
// //
// //go:build // //go:build
// //
// that lists the conditions under which a file should be included in the package.
// Constraints may appear in any kind of source file (not just Go), but // Constraints may appear in any kind of source file (not just Go), but
// they must appear near the top of the file, preceded // they must appear near the top of the file, preceded
// only by blank lines and other line comments. These rules mean that in Go // only by blank lines and other line comments. These rules mean that in Go
@ -1810,9 +1834,9 @@
// To distinguish build constraints from package documentation, // To distinguish build constraints from package documentation,
// a build constraint should be followed by a blank line. // a build constraint should be followed by a blank line.
// //
// A build constraint is evaluated as an expression containing options // A build constraint comment is evaluated as an expression containing
// combined by ||, &&, and ! operators and parentheses. Operators have // build tags combined by ||, &&, and ! operators and parentheses.
// the same meaning as in Go. // Operators have the same meaning as in Go.
// //
// For example, the following build constraint constrains a file to // For example, the following build constraint constrains a file to
// build when the "linux" and "386" constraints are satisfied, or when // build when the "linux" and "386" constraints are satisfied, or when
@ -1822,7 +1846,7 @@
// //
// It is an error for a file to have more than one //go:build line. // It is an error for a file to have more than one //go:build line.
// //
// During a particular build, the following words are satisfied: // During a particular build, the following build tags are satisfied:
// //
// - the target operating system, as spelled by runtime.GOOS, set with the // - the target operating system, as spelled by runtime.GOOS, set with the
// GOOS environment variable. // GOOS environment variable.

View file

@ -991,21 +991,6 @@ func TestIssue10952(t *testing.T) {
tg.run("get", "-d", "-u", importPath) tg.run("get", "-d", "-u", importPath)
} }
func TestIssue16471(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
testenv.MustHaveExecPath(t, "git")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
tg.setenv("GOPATH", tg.path("."))
tg.must(os.MkdirAll(tg.path("src/rsc.io/go-get-issue-10952"), 0755))
tg.runGit(tg.path("src/rsc.io"), "clone", "https://github.com/zombiezen/go-get-issue-10952")
tg.runFail("get", "-u", "rsc.io/go-get-issue-10952")
tg.grepStderr("rsc.io/go-get-issue-10952 is a custom import path for https://github.com/rsc/go-get-issue-10952, but .* is checked out from https://github.com/zombiezen/go-get-issue-10952", "did not detect updated import path")
}
// Test git clone URL that uses SCP-like syntax and custom import path checking. // Test git clone URL that uses SCP-like syntax and custom import path checking.
func TestIssue11457(t *testing.T) { func TestIssue11457(t *testing.T) {
testenv.MustHaveExternalNetwork(t) testenv.MustHaveExternalNetwork(t)
@ -1363,6 +1348,15 @@ func tempEnvName() string {
} }
} }
func pathEnvName() string {
switch runtime.GOOS {
case "plan9":
return "path"
default:
return "PATH"
}
}
func TestDefaultGOPATH(t *testing.T) { func TestDefaultGOPATH(t *testing.T) {
tg := testgo(t) tg := testgo(t)
defer tg.cleanup() defer tg.cleanup()

View file

@ -6,16 +6,65 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"internal/godebug"
"io/fs" "io/fs"
"io/ioutil" "io/ioutil"
"log"
"os" "os"
pathpkg "path"
"path/filepath" "path/filepath"
"runtime" "runtime"
"runtime/debug"
"sort" "sort"
"strings" "strings"
"sync"
"time" "time"
) )
// Trace emits a trace event for the operation and file path to the trace log,
// but only when $GODEBUG contains gofsystrace=1.
// The traces are appended to the file named by the $GODEBUG setting gofsystracelog, or else standard error.
// For debugging, if the $GODEBUG setting gofsystracestack is non-empty, then trace events for paths
// matching that glob pattern (using path.Match) will be followed by a full stack trace.
func Trace(op, path string) {
if !doTrace {
return
}
traceMu.Lock()
defer traceMu.Unlock()
fmt.Fprintf(traceFile, "%d gofsystrace %s %s\n", os.Getpid(), op, path)
if traceStack != "" {
if match, _ := pathpkg.Match(traceStack, path); match {
traceFile.Write(debug.Stack())
}
}
}
var (
doTrace bool
traceStack string
traceFile *os.File
traceMu sync.Mutex
)
func init() {
if godebug.Get("gofsystrace") != "1" {
return
}
doTrace = true
traceStack = godebug.Get("gofsystracestack")
if f := godebug.Get("gofsystracelog"); f != "" {
// Note: No buffering on writes to this file, so no need to worry about closing it at exit.
var err error
traceFile, err = os.OpenFile(f, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Fatal(err)
}
} else {
traceFile = os.Stderr
}
}
// OverlayFile is the path to a text file in the OverlayJSON format. // OverlayFile is the path to a text file in the OverlayJSON format.
// It is the value of the -overlay flag. // It is the value of the -overlay flag.
var OverlayFile string var OverlayFile string
@ -86,6 +135,7 @@ func Init(wd string) error {
return nil return nil
} }
Trace("ReadFile", OverlayFile)
b, err := os.ReadFile(OverlayFile) b, err := os.ReadFile(OverlayFile)
if err != nil { if err != nil {
return fmt.Errorf("reading overlay file: %v", err) return fmt.Errorf("reading overlay file: %v", err)
@ -191,6 +241,7 @@ func initFromJSON(overlayJSON OverlayJSON) error {
// IsDir returns true if path is a directory on disk or in the // IsDir returns true if path is a directory on disk or in the
// overlay. // overlay.
func IsDir(path string) (bool, error) { func IsDir(path string) (bool, error) {
Trace("IsDir", path)
path = canonicalize(path) path = canonicalize(path)
if _, ok := parentIsOverlayFile(path); ok { if _, ok := parentIsOverlayFile(path); ok {
@ -260,6 +311,7 @@ func readDir(dir string) ([]fs.FileInfo, error) {
// ReadDir provides a slice of fs.FileInfo entries corresponding // ReadDir provides a slice of fs.FileInfo entries corresponding
// to the overlaid files in the directory. // to the overlaid files in the directory.
func ReadDir(dir string) ([]fs.FileInfo, error) { func ReadDir(dir string) ([]fs.FileInfo, error) {
Trace("ReadDir", dir)
dir = canonicalize(dir) dir = canonicalize(dir)
if _, ok := parentIsOverlayFile(dir); ok { if _, ok := parentIsOverlayFile(dir); ok {
return nil, &fs.PathError{Op: "ReadDir", Path: dir, Err: errNotDir} return nil, &fs.PathError{Op: "ReadDir", Path: dir, Err: errNotDir}
@ -327,11 +379,17 @@ func OverlayPath(path string) (string, bool) {
// Open opens the file at or overlaid on the given path. // Open opens the file at or overlaid on the given path.
func Open(path string) (*os.File, error) { func Open(path string) (*os.File, error) {
return OpenFile(path, os.O_RDONLY, 0) Trace("Open", path)
return openFile(path, os.O_RDONLY, 0)
} }
// OpenFile opens the file at or overlaid on the given path with the flag and perm. // OpenFile opens the file at or overlaid on the given path with the flag and perm.
func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) { func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
Trace("OpenFile", path)
return openFile(path, flag, perm)
}
func openFile(path string, flag int, perm os.FileMode) (*os.File, error) {
cpath := canonicalize(path) cpath := canonicalize(path)
if node, ok := overlay[cpath]; ok { if node, ok := overlay[cpath]; ok {
// Opening a file in the overlay. // Opening a file in the overlay.
@ -360,6 +418,7 @@ func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
// IsDirWithGoFiles reports whether dir is a directory containing Go files // IsDirWithGoFiles reports whether dir is a directory containing Go files
// either on disk or in the overlay. // either on disk or in the overlay.
func IsDirWithGoFiles(dir string) (bool, error) { func IsDirWithGoFiles(dir string) (bool, error) {
Trace("IsDirWithGoFiles", dir)
fis, err := ReadDir(dir) fis, err := ReadDir(dir)
if os.IsNotExist(err) || errors.Is(err, errNotDir) { if os.IsNotExist(err) || errors.Is(err, errNotDir) {
return false, nil return false, nil
@ -405,28 +464,20 @@ func IsDirWithGoFiles(dir string) (bool, error) {
// walk recursively descends path, calling walkFn. Copied, with some // walk recursively descends path, calling walkFn. Copied, with some
// modifications from path/filepath.walk. // modifications from path/filepath.walk.
func walk(path string, info fs.FileInfo, walkFn filepath.WalkFunc) error { func walk(path string, info fs.FileInfo, walkFn filepath.WalkFunc) error {
if !info.IsDir() { if err := walkFn(path, info, nil); err != nil || !info.IsDir() {
return walkFn(path, info, nil) return err
} }
fis, readErr := ReadDir(path) fis, err := ReadDir(path)
walkErr := walkFn(path, info, readErr) if err != nil {
// If readErr != nil, walk can't walk into this directory. return walkFn(path, info, err)
// walkErr != nil means walkFn want walk to skip this directory or stop walking.
// Therefore, if one of readErr and walkErr isn't nil, walk will return.
if readErr != nil || walkErr != nil {
// The caller's behavior is controlled by the return value, which is decided
// by walkFn. walkFn may ignore readErr and return nil.
// If walkFn returns SkipDir, it will be handled by the caller.
// So walk should return whatever walkFn returns.
return walkErr
} }
for _, fi := range fis { for _, fi := range fis {
filename := filepath.Join(path, fi.Name()) filename := filepath.Join(path, fi.Name())
if walkErr = walk(filename, fi, walkFn); walkErr != nil { if err := walk(filename, fi, walkFn); err != nil {
if !fi.IsDir() || walkErr != filepath.SkipDir { if !fi.IsDir() || err != filepath.SkipDir {
return walkErr return err
} }
} }
} }
@ -436,6 +487,7 @@ func walk(path string, info fs.FileInfo, walkFn filepath.WalkFunc) error {
// Walk walks the file tree rooted at root, calling walkFn for each file or // Walk walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root. // directory in the tree, including root.
func Walk(root string, walkFn filepath.WalkFunc) error { func Walk(root string, walkFn filepath.WalkFunc) error {
Trace("Walk", root)
info, err := Lstat(root) info, err := Lstat(root)
if err != nil { if err != nil {
err = walkFn(root, nil, err) err = walkFn(root, nil, err)
@ -450,11 +502,13 @@ func Walk(root string, walkFn filepath.WalkFunc) error {
// lstat implements a version of os.Lstat that operates on the overlay filesystem. // lstat implements a version of os.Lstat that operates on the overlay filesystem.
func Lstat(path string) (fs.FileInfo, error) { func Lstat(path string) (fs.FileInfo, error) {
Trace("Lstat", path)
return overlayStat(path, os.Lstat, "lstat") return overlayStat(path, os.Lstat, "lstat")
} }
// Stat implements a version of os.Stat that operates on the overlay filesystem. // Stat implements a version of os.Stat that operates on the overlay filesystem.
func Stat(path string) (fs.FileInfo, error) { func Stat(path string) (fs.FileInfo, error) {
Trace("Stat", path)
return overlayStat(path, os.Stat, "stat") return overlayStat(path, os.Stat, "stat")
} }
@ -528,6 +582,7 @@ func (f fakeDir) Sys() any { return nil }
// Glob is like filepath.Glob but uses the overlay file system. // Glob is like filepath.Glob but uses the overlay file system.
func Glob(pattern string) (matches []string, err error) { func Glob(pattern string) (matches []string, err error) {
Trace("Glob", pattern)
// Check pattern is well-formed. // Check pattern is well-formed.
if _, err := filepath.Match(pattern, ""); err != nil { if _, err := filepath.Match(pattern, ""); err != nil {
return nil, err return nil, err

View file

@ -812,11 +812,12 @@ var HelpBuildConstraint = &base.Command{
UsageLine: "buildconstraint", UsageLine: "buildconstraint",
Short: "build constraints", Short: "build constraints",
Long: ` Long: `
A build constraint, also known as a build tag, is a line comment that begins A build constraint, also known as a build tag, is a condition under which a
file should be included in the package. Build constraints are given by a
line comment that begins
//go:build //go:build
that lists the conditions under which a file should be included in the package.
Constraints may appear in any kind of source file (not just Go), but Constraints may appear in any kind of source file (not just Go), but
they must appear near the top of the file, preceded they must appear near the top of the file, preceded
only by blank lines and other line comments. These rules mean that in Go only by blank lines and other line comments. These rules mean that in Go
@ -825,9 +826,9 @@ files a build constraint must appear before the package clause.
To distinguish build constraints from package documentation, To distinguish build constraints from package documentation,
a build constraint should be followed by a blank line. a build constraint should be followed by a blank line.
A build constraint is evaluated as an expression containing options A build constraint comment is evaluated as an expression containing
combined by ||, &&, and ! operators and parentheses. Operators have build tags combined by ||, &&, and ! operators and parentheses.
the same meaning as in Go. Operators have the same meaning as in Go.
For example, the following build constraint constrains a file to For example, the following build constraint constrains a file to
build when the "linux" and "386" constraints are satisfied, or when build when the "linux" and "386" constraints are satisfied, or when
@ -837,7 +838,7 @@ build when the "linux" and "386" constraints are satisfied, or when
It is an error for a file to have more than one //go:build line. It is an error for a file to have more than one //go:build line.
During a particular build, the following words are satisfied: During a particular build, the following build tags are satisfied:
- the target operating system, as spelled by runtime.GOOS, set with the - the target operating system, as spelled by runtime.GOOS, set with the
GOOS environment variable. GOOS environment variable.

View file

@ -223,6 +223,7 @@ applied to a Go struct, but now a Module struct:
type Module struct { type Module struct {
Path string // module path Path string // module path
Query string // version query corresponding to this version
Version string // module version Version string // module version
Versions []string // available module versions Versions []string // available module versions
Replace *Module // replaced by this module Replace *Module // replaced by this module
@ -236,6 +237,8 @@ applied to a Go struct, but now a Module struct:
Retracted []string // retraction information, if any (with -retracted or -u) Retracted []string // retraction information, if any (with -retracted or -u)
Deprecated string // deprecation message, if any (with -u) Deprecated string // deprecation message, if any (with -u)
Error *ModuleError // error loading module Error *ModuleError // error loading module
Origin any // provenance of module
Reuse bool // reuse of old module info is safe
} }
type ModuleError struct { type ModuleError struct {
@ -312,6 +315,16 @@ that must be a module path or query and returns the specified
module as a Module struct. If an error occurs, the result will module as a Module struct. If an error occurs, the result will
be a Module struct with a non-nil Error field. be a Module struct with a non-nil Error field.
When using -m, the -reuse=old.json flag accepts the name of file containing
the JSON output of a previous 'go list -m -json' invocation with the
same set of modifier flags (such as -u, -retracted, and -versions).
The go command may use this file to determine that a module is unchanged
since the previous invocation and avoid redownloading information about it.
Modules that are not redownloaded will be marked in the new output by
setting the Reuse field to true. Normally the module cache provides this
kind of reuse automatically; the -reuse flag can be useful on systems that
do not preserve the module cache.
For more about build flags, see 'go help build'. For more about build flags, see 'go help build'.
For more about specifying packages, see 'go help packages'. For more about specifying packages, see 'go help packages'.
@ -337,6 +350,7 @@ var (
listJsonFields jsonFlag // If not empty, only output these fields. listJsonFields jsonFlag // If not empty, only output these fields.
listM = CmdList.Flag.Bool("m", false, "") listM = CmdList.Flag.Bool("m", false, "")
listRetracted = CmdList.Flag.Bool("retracted", false, "") listRetracted = CmdList.Flag.Bool("retracted", false, "")
listReuse = CmdList.Flag.String("reuse", "", "")
listTest = CmdList.Flag.Bool("test", false, "") listTest = CmdList.Flag.Bool("test", false, "")
listU = CmdList.Flag.Bool("u", false, "") listU = CmdList.Flag.Bool("u", false, "")
listVersions = CmdList.Flag.Bool("versions", false, "") listVersions = CmdList.Flag.Bool("versions", false, "")
@ -398,6 +412,12 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
if *listFmt != "" && listJson == true { if *listFmt != "" && listJson == true {
base.Fatalf("go list -f cannot be used with -json") base.Fatalf("go list -f cannot be used with -json")
} }
if *listReuse != "" && !*listM {
base.Fatalf("go list -reuse cannot be used without -m")
}
if *listReuse != "" && modload.HasModRoot() {
base.Fatalf("go list -reuse cannot be used inside a module")
}
work.BuildInit() work.BuildInit()
out := newTrackingWriter(os.Stdout) out := newTrackingWriter(os.Stdout)
@ -532,7 +552,10 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
mode |= modload.ListRetractedVersions mode |= modload.ListRetractedVersions
} }
} }
mods, err := modload.ListModules(ctx, args, mode) if *listReuse != "" && len(args) == 0 {
base.Fatalf("go: list -m -reuse only has an effect with module@version arguments")
}
mods, err := modload.ListModules(ctx, args, mode, *listReuse)
if !*listE { if !*listE {
for _, m := range mods { for _, m := range mods {
if m.Error != nil { if m.Error != nil {
@ -783,7 +806,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
if *listRetracted { if *listRetracted {
mode |= modload.ListRetracted mode |= modload.ListRetracted
} }
rmods, err := modload.ListModules(ctx, args, mode) rmods, err := modload.ListModules(ctx, args, mode, *listReuse)
if err != nil && !*listE { if err != nil && !*listE {
base.Errorf("go: %v", err) base.Errorf("go: %v", err)
} }

View file

@ -877,7 +877,14 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo
if !cfg.ModulesEnabled { if !cfg.ModulesEnabled {
buildMode = build.ImportComment buildMode = build.ImportComment
} }
if modroot := modload.PackageModRoot(ctx, r.path); modroot != "" { modroot := modload.PackageModRoot(ctx, r.path)
if modroot == "" && str.HasPathPrefix(r.dir, cfg.GOROOTsrc) {
modroot = cfg.GOROOTsrc
if str.HasPathPrefix(r.dir, cfg.GOROOTsrc+string(filepath.Separator)+"cmd") {
modroot += string(filepath.Separator) + "cmd"
}
}
if modroot != "" {
if rp, err := modindex.GetPackage(modroot, r.dir); err == nil { if rp, err := modindex.GetPackage(modroot, r.dir); err == nil {
data.p, data.err = rp.Import(cfg.BuildContext, buildMode) data.p, data.err = rp.Import(cfg.BuildContext, buildMode)
goto Happy goto Happy

View file

@ -13,6 +13,7 @@ import (
"cmd/go/internal/base" "cmd/go/internal/base"
"cmd/go/internal/cfg" "cmd/go/internal/cfg"
"cmd/go/internal/modfetch" "cmd/go/internal/modfetch"
"cmd/go/internal/modfetch/codehost"
"cmd/go/internal/modload" "cmd/go/internal/modload"
"golang.org/x/mod/module" "golang.org/x/mod/module"
@ -20,7 +21,7 @@ import (
) )
var cmdDownload = &base.Command{ var cmdDownload = &base.Command{
UsageLine: "go mod download [-x] [-json] [modules]", UsageLine: "go mod download [-x] [-json] [-reuse=old.json] [modules]",
Short: "download modules to local cache", Short: "download modules to local cache",
Long: ` Long: `
Download downloads the named modules, which can be module patterns selecting Download downloads the named modules, which can be module patterns selecting
@ -44,6 +45,7 @@ corresponding to this Go struct:
type Module struct { type Module struct {
Path string // module path Path string // module path
Query string // version query corresponding to this version
Version string // module version Version string // module version
Error string // error loading module Error string // error loading module
Info string // absolute path to cached .info file Info string // absolute path to cached .info file
@ -52,8 +54,18 @@ corresponding to this Go struct:
Dir string // absolute path to cached source root directory Dir string // absolute path to cached source root directory
Sum string // checksum for path, version (as in go.sum) Sum string // checksum for path, version (as in go.sum)
GoModSum string // checksum for go.mod (as in go.sum) GoModSum string // checksum for go.mod (as in go.sum)
Origin any // provenance of module
Reuse bool // reuse of old module info is safe
} }
The -reuse flag accepts the name of file containing the JSON output of a
previous 'go mod download -json' invocation. The go command may use this
file to determine that a module is unchanged since the previous invocation
and avoid redownloading it. Modules that are not redownloaded will be marked
in the new output by setting the Reuse field to true. Normally the module
cache provides this kind of reuse automatically; the -reuse flag can be
useful on systems that do not preserve the module cache.
The -x flag causes download to print the commands download executes. The -x flag causes download to print the commands download executes.
See https://golang.org/ref/mod#go-mod-download for more about 'go mod download'. See https://golang.org/ref/mod#go-mod-download for more about 'go mod download'.
@ -62,7 +74,10 @@ See https://golang.org/ref/mod#version-queries for more about version queries.
`, `,
} }
var downloadJSON = cmdDownload.Flag.Bool("json", false, "") var (
downloadJSON = cmdDownload.Flag.Bool("json", false, "")
downloadReuse = cmdDownload.Flag.String("reuse", "", "")
)
func init() { func init() {
cmdDownload.Run = runDownload // break init cycle cmdDownload.Run = runDownload // break init cycle
@ -75,6 +90,7 @@ func init() {
type moduleJSON struct { type moduleJSON struct {
Path string `json:",omitempty"` Path string `json:",omitempty"`
Version string `json:",omitempty"` Version string `json:",omitempty"`
Query string `json:",omitempty"`
Error string `json:",omitempty"` Error string `json:",omitempty"`
Info string `json:",omitempty"` Info string `json:",omitempty"`
GoMod string `json:",omitempty"` GoMod string `json:",omitempty"`
@ -82,6 +98,9 @@ type moduleJSON struct {
Dir string `json:",omitempty"` Dir string `json:",omitempty"`
Sum string `json:",omitempty"` Sum string `json:",omitempty"`
GoModSum string `json:",omitempty"` GoModSum string `json:",omitempty"`
Origin *codehost.Origin `json:",omitempty"`
Reuse bool `json:",omitempty"`
} }
func runDownload(ctx context.Context, cmd *base.Command, args []string) { func runDownload(ctx context.Context, cmd *base.Command, args []string) {
@ -148,12 +167,12 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
} }
downloadModule := func(m *moduleJSON) { downloadModule := func(m *moduleJSON) {
var err error _, file, err := modfetch.InfoFile(m.Path, m.Version)
m.Info, err = modfetch.InfoFile(m.Path, m.Version)
if err != nil { if err != nil {
m.Error = err.Error() m.Error = err.Error()
return return
} }
m.Info = file
m.GoMod, err = modfetch.GoModFile(m.Path, m.Version) m.GoMod, err = modfetch.GoModFile(m.Path, m.Version)
if err != nil { if err != nil {
m.Error = err.Error() m.Error = err.Error()
@ -179,15 +198,21 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
} }
var mods []*moduleJSON var mods []*moduleJSON
if *downloadReuse != "" && modload.HasModRoot() {
base.Fatalf("go mod download -reuse cannot be used inside a module")
}
type token struct{} type token struct{}
sem := make(chan token, runtime.GOMAXPROCS(0)) sem := make(chan token, runtime.GOMAXPROCS(0))
infos, infosErr := modload.ListModules(ctx, args, 0) infos, infosErr := modload.ListModules(ctx, args, 0, *downloadReuse)
if !haveExplicitArgs { if !haveExplicitArgs && modload.WorkFilePath() == "" {
// 'go mod download' is sometimes run without arguments to pre-populate the // 'go mod download' is sometimes run without arguments to pre-populate the
// module cache. It may fetch modules that aren't needed to build packages // module cache. In modules that aren't at go 1.17 or higher, it may fetch
// in the main module. This is usually not intended, so don't save sums for // modules that aren't needed to build packages in the main module. This is
// downloaded modules (golang.org/issue/45332). We do still fix // usually not intended, so don't save sums for downloaded modules
// inconsistencies in go.mod though. // (golang.org/issue/45332). We do still fix inconsistencies in go.mod
// though.
// //
// TODO(#45551): In the future, report an error if go.mod or go.sum need to // TODO(#45551): In the future, report an error if go.mod or go.sum need to
// be updated after loading the build list. This may require setting // be updated after loading the build list. This may require setting
@ -209,12 +234,18 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
m := &moduleJSON{ m := &moduleJSON{
Path: info.Path, Path: info.Path,
Version: info.Version, Version: info.Version,
Query: info.Query,
Reuse: info.Reuse,
Origin: info.Origin,
} }
mods = append(mods, m) mods = append(mods, m)
if info.Error != nil { if info.Error != nil {
m.Error = info.Error.Err m.Error = info.Error.Err
continue continue
} }
if m.Reuse {
continue
}
sem <- token{} sem <- token{}
go func() { go func() {
downloadModule(m) downloadModule(m)
@ -252,8 +283,19 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
// 'go get mod@version', which may have other side effects. We print this in // 'go get mod@version', which may have other side effects. We print this in
// some error message hints. // some error message hints.
// //
// Don't save sums for 'go mod download' without arguments; see comment above. // If we're in workspace mode, update go.work.sum with checksums for all of
if haveExplicitArgs { // the modules we downloaded that aren't already recorded. Since a requirement
// in one module may upgrade a dependency of another, we can't be sure that
// the import graph matches the import graph of any given module in isolation,
// so we may end up needing to load packages from modules that wouldn't
// otherwise be relevant.
//
// TODO(#44435): If we adjust the set of modules downloaded in workspace mode,
// we may also need to adjust the logic for saving checksums here.
//
// Don't save sums for 'go mod download' without arguments unless we're in
// workspace mode; see comment above.
if haveExplicitArgs || modload.WorkFilePath() != "" {
if err := modload.WriteGoMod(ctx); err != nil { if err := modload.WriteGoMod(ctx); err != nil {
base.Errorf("go: %v", err) base.Errorf("go: %v", err)
} }

View file

@ -82,7 +82,7 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) {
} }
} }
mods, err := modload.ListModules(ctx, args, 0) mods, err := modload.ListModules(ctx, args, 0, "")
if err != nil { if err != nil {
base.Fatalf("go: %v", err) base.Fatalf("go: %v", err)
} }

View file

@ -164,7 +164,7 @@ func SideLock() (unlock func(), err error) {
} }
// A cachingRepo is a cache around an underlying Repo, // A cachingRepo is a cache around an underlying Repo,
// avoiding redundant calls to ModulePath, Versions, Stat, Latest, and GoMod (but not Zip). // avoiding redundant calls to ModulePath, Versions, Stat, Latest, and GoMod (but not CheckReuse or Zip).
// It is also safe for simultaneous use by multiple goroutines // It is also safe for simultaneous use by multiple goroutines
// (so that it can be returned from Lookup multiple times). // (so that it can be returned from Lookup multiple times).
// It serializes calls to the underlying Repo. // It serializes calls to the underlying Repo.
@ -195,24 +195,32 @@ func (r *cachingRepo) repo() Repo {
return r.r return r.r
} }
func (r *cachingRepo) CheckReuse(old *codehost.Origin) error {
return r.repo().CheckReuse(old)
}
func (r *cachingRepo) ModulePath() string { func (r *cachingRepo) ModulePath() string {
return r.path return r.path
} }
func (r *cachingRepo) Versions(prefix string) ([]string, error) { func (r *cachingRepo) Versions(prefix string) (*Versions, error) {
type cached struct { type cached struct {
list []string v *Versions
err error err error
} }
c := r.cache.Do("versions:"+prefix, func() any { c := r.cache.Do("versions:"+prefix, func() any {
list, err := r.repo().Versions(prefix) v, err := r.repo().Versions(prefix)
return cached{list, err} return cached{v, err}
}).(cached) }).(cached)
if c.err != nil { if c.err != nil {
return nil, c.err return nil, c.err
} }
return append([]string(nil), c.list...), nil v := &Versions{
Origin: c.v.Origin,
List: append([]string(nil), c.v.List...),
}
return v, nil
} }
type cachedInfo struct { type cachedInfo struct {
@ -245,11 +253,12 @@ func (r *cachingRepo) Stat(rev string) (*RevInfo, error) {
return cachedInfo{info, err} return cachedInfo{info, err}
}).(cachedInfo) }).(cachedInfo)
if c.err != nil { info := c.info
return nil, c.err if info != nil {
copy := *info
info = &copy
} }
info := *c.info return info, c.err
return &info, nil
} }
func (r *cachingRepo) Latest() (*RevInfo, error) { func (r *cachingRepo) Latest() (*RevInfo, error) {
@ -269,11 +278,12 @@ func (r *cachingRepo) Latest() (*RevInfo, error) {
return cachedInfo{info, err} return cachedInfo{info, err}
}).(cachedInfo) }).(cachedInfo)
if c.err != nil { info := c.info
return nil, c.err if info != nil {
copy := *info
info = &copy
} }
info := *c.info return info, c.err
return &info, nil
} }
func (r *cachingRepo) GoMod(version string) ([]byte, error) { func (r *cachingRepo) GoMod(version string) ([]byte, error) {
@ -310,31 +320,41 @@ func (r *cachingRepo) Zip(dst io.Writer, version string) error {
return r.repo().Zip(dst, version) return r.repo().Zip(dst, version)
} }
// InfoFile is like Lookup(path).Stat(version) but returns the name of the file // InfoFile is like Lookup(path).Stat(version) but also returns the name of the file
// containing the cached information. // containing the cached information.
func InfoFile(path, version string) (string, error) { func InfoFile(path, version string) (*RevInfo, string, error) {
if !semver.IsValid(version) { if !semver.IsValid(version) {
return "", fmt.Errorf("invalid version %q", version) return nil, "", fmt.Errorf("invalid version %q", version)
} }
if file, _, err := readDiskStat(path, version); err == nil { if file, info, err := readDiskStat(path, version); err == nil {
return file, nil return info, file, nil
} }
var info *RevInfo
var err2info map[error]*RevInfo
err := TryProxies(func(proxy string) error { err := TryProxies(func(proxy string) error {
_, err := Lookup(proxy, path).Stat(version) i, err := Lookup(proxy, path).Stat(version)
if err == nil {
info = i
} else {
if err2info == nil {
err2info = make(map[error]*RevInfo)
}
err2info[err] = info
}
return err return err
}) })
if err != nil { if err != nil {
return "", err return err2info[err], "", err
} }
// Stat should have populated the disk cache for us. // Stat should have populated the disk cache for us.
file, err := CachePath(module.Version{Path: path, Version: version}, "info") file, err := CachePath(module.Version{Path: path, Version: version}, "info")
if err != nil { if err != nil {
return "", err return nil, "", err
} }
return file, nil return info, file, nil
} }
// GoMod is like Lookup(path).GoMod(rev) but avoids the // GoMod is like Lookup(path).GoMod(rev) but avoids the
@ -561,6 +581,26 @@ func writeDiskStat(file string, info *RevInfo) error {
if file == "" { if file == "" {
return nil return nil
} }
if info.Origin != nil {
// Clean the origin information, which might have too many
// validation criteria, for example if we are saving the result of
// m@master as m@pseudo-version.
clean := *info
info = &clean
o := *info.Origin
info.Origin = &o
// Tags never matter if you are starting with a semver version,
// as we would be when finding this cache entry.
o.TagSum = ""
o.TagPrefix = ""
// Ref doesn't matter if you have a pseudoversion.
if module.IsPseudoVersion(info.Version) {
o.Ref = ""
}
}
js, err := json.Marshal(info) js, err := json.Marshal(info)
if err != nil { if err != nil {
return err return err

View file

@ -22,6 +22,9 @@ import (
"cmd/go/internal/cfg" "cmd/go/internal/cfg"
"cmd/go/internal/lockedfile" "cmd/go/internal/lockedfile"
"cmd/go/internal/str" "cmd/go/internal/str"
"golang.org/x/mod/module"
"golang.org/x/mod/semver"
) )
// Downloaded size limits. // Downloaded size limits.
@ -36,8 +39,15 @@ const (
// remote version control servers, and code hosting sites. // remote version control servers, and code hosting sites.
// A Repo must be safe for simultaneous use by multiple goroutines. // A Repo must be safe for simultaneous use by multiple goroutines.
type Repo interface { type Repo interface {
// CheckReuse checks whether the old origin information
// remains up to date. If so, whatever cached object it was
// taken from can be reused.
// The subdir gives subdirectory name where the module root is expected to be found,
// "" for the root or "sub/dir" for a subdirectory (no trailing slash).
CheckReuse(old *Origin, subdir string) error
// List lists all tags with the given prefix. // List lists all tags with the given prefix.
Tags(prefix string) (tags []string, err error) Tags(prefix string) (*Tags, error)
// Stat returns information about the revision rev. // Stat returns information about the revision rev.
// A revision can be any identifier known to the underlying service: // A revision can be any identifier known to the underlying service:
@ -74,8 +84,88 @@ type Repo interface {
DescendsFrom(rev, tag string) (bool, error) DescendsFrom(rev, tag string) (bool, error)
} }
// A Rev describes a single revision in a source code repository. // An Origin describes the provenance of a given repo method result.
// It can be passed to CheckReuse (usually in a different go command invocation)
// to see whether the result remains up-to-date.
type Origin struct {
VCS string `json:",omitempty"` // "git" etc
URL string `json:",omitempty"` // URL of repository
Subdir string `json:",omitempty"` // subdirectory in repo
// If TagSum is non-empty, then the resolution of this module version
// depends on the set of tags present in the repo, specifically the tags
// of the form TagPrefix + a valid semver version.
// If the matching repo tags and their commit hashes still hash to TagSum,
// the Origin is still valid (at least as far as the tags are concerned).
// The exact checksum is up to the Repo implementation; see (*gitRepo).Tags.
TagPrefix string `json:",omitempty"`
TagSum string `json:",omitempty"`
// If Ref is non-empty, then the resolution of this module version
// depends on Ref resolving to the revision identified by Hash.
// If Ref still resolves to Hash, the Origin is still valid (at least as far as Ref is concerned).
// For Git, the Ref is a full ref like "refs/heads/main" or "refs/tags/v1.2.3",
// and the Hash is the Git object hash the ref maps to.
// Other VCS might choose differently, but the idea is that Ref is the name
// with a mutable meaning while Hash is a name with an immutable meaning.
Ref string `json:",omitempty"`
Hash string `json:",omitempty"`
// If RepoSum is non-empty, then the resolution of this module version
// failed due to the repo being available but the version not being present.
// This depends on the entire state of the repo, which RepoSum summarizes.
// For Git, this is a hash of all the refs and their hashes.
RepoSum string `json:",omitempty"`
}
// Checkable reports whether the Origin contains anything that can be checked.
// If not, the Origin is purely informational and should fail a CheckReuse call.
func (o *Origin) Checkable() bool {
return o.TagSum != "" || o.Ref != "" || o.Hash != "" || o.RepoSum != ""
}
// ClearCheckable clears the Origin enough to make Checkable return false.
func (o *Origin) ClearCheckable() {
o.TagSum = ""
o.TagPrefix = ""
o.Ref = ""
o.Hash = ""
o.RepoSum = ""
}
// A Tags describes the available tags in a code repository.
type Tags struct {
Origin *Origin
List []Tag
}
// A Tag describes a single tag in a code repository.
type Tag struct {
Name string
Hash string // content hash identifying tag's content, if available
}
// isOriginTag reports whether tag should be preserved
// in the Tags method's Origin calculation.
// We can safely ignore tags that are not look like pseudo-versions,
// because ../coderepo.go's (*codeRepo).Versions ignores them too.
// We can also ignore non-semver tags, but we have to include semver
// tags with extra suffixes, because the pseudo-version base finder uses them.
func isOriginTag(tag string) bool {
// modfetch.(*codeRepo).Versions uses Canonical == tag,
// but pseudo-version calculation has a weaker condition that
// the canonical is a prefix of the tag.
// Include those too, so that if any new one appears, we'll invalidate the cache entry.
// This will lead to spurious invalidation of version list results,
// but tags of this form being created should be fairly rare
// (and invalidate pseudo-version results anyway).
c := semver.Canonical(tag)
return c != "" && strings.HasPrefix(tag, c) && !module.IsPseudoVersion(tag)
}
// A RevInfo describes a single revision in a source code repository.
type RevInfo struct { type RevInfo struct {
Origin *Origin
Name string // complete ID in underlying repository Name string // complete ID in underlying repository
Short string // shortened ID, for use in pseudo-version Short string // shortened ID, for use in pseudo-version
Version string // version used in lookup Version string // version used in lookup
@ -157,7 +247,7 @@ func WorkDir(typ, name string) (dir, lockfile string, err error) {
lockfile = dir + ".lock" lockfile = dir + ".lock"
if cfg.BuildX { if cfg.BuildX {
fmt.Fprintf(os.Stderr, "# lock %s", lockfile) fmt.Fprintf(os.Stderr, "# lock %s\n", lockfile)
} }
unlock, err := lockedfile.MutexAt(lockfile).Lock() unlock, err := lockedfile.MutexAt(lockfile).Lock()

View file

@ -6,6 +6,8 @@ package codehost
import ( import (
"bytes" "bytes"
"crypto/sha256"
"encoding/base64"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -169,6 +171,57 @@ func (r *gitRepo) loadLocalTags() {
} }
} }
func (r *gitRepo) CheckReuse(old *Origin, subdir string) error {
if old == nil {
return fmt.Errorf("missing origin")
}
if old.VCS != "git" || old.URL != r.remoteURL {
return fmt.Errorf("origin moved from %v %q to %v %q", old.VCS, old.URL, "git", r.remoteURL)
}
if old.Subdir != subdir {
return fmt.Errorf("origin moved from %v %q %q to %v %q %q", old.VCS, old.URL, old.Subdir, "git", r.remoteURL, subdir)
}
// Note: Can have Hash with no Ref and no TagSum and no RepoSum,
// meaning the Hash simply has to remain in the repo.
// In that case we assume it does in the absence of any real way to check.
// But if neither Hash nor TagSum is present, we have nothing to check,
// which we take to mean we didn't record enough information to be sure.
if old.Hash == "" && old.TagSum == "" && old.RepoSum == "" {
return fmt.Errorf("non-specific origin")
}
r.loadRefs()
if r.refsErr != nil {
return r.refsErr
}
if old.Ref != "" {
hash, ok := r.refs[old.Ref]
if !ok {
return fmt.Errorf("ref %q deleted", old.Ref)
}
if hash != old.Hash {
return fmt.Errorf("ref %q moved from %s to %s", old.Ref, old.Hash, hash)
}
}
if old.TagSum != "" {
tags, err := r.Tags(old.TagPrefix)
if err != nil {
return err
}
if tags.Origin.TagSum != old.TagSum {
return fmt.Errorf("tags changed")
}
}
if old.RepoSum != "" {
if r.repoSum(r.refs) != old.RepoSum {
return fmt.Errorf("refs changed")
}
}
return nil
}
// loadRefs loads heads and tags references from the remote into the map r.refs. // loadRefs loads heads and tags references from the remote into the map r.refs.
// The result is cached in memory. // The result is cached in memory.
func (r *gitRepo) loadRefs() (map[string]string, error) { func (r *gitRepo) loadRefs() (map[string]string, error) {
@ -219,14 +272,21 @@ func (r *gitRepo) loadRefs() (map[string]string, error) {
return r.refs, r.refsErr return r.refs, r.refsErr
} }
func (r *gitRepo) Tags(prefix string) ([]string, error) { func (r *gitRepo) Tags(prefix string) (*Tags, error) {
refs, err := r.loadRefs() refs, err := r.loadRefs()
if err != nil { if err != nil {
return nil, err return nil, err
} }
tags := []string{} tags := &Tags{
for ref := range refs { Origin: &Origin{
VCS: "git",
URL: r.remoteURL,
TagPrefix: prefix,
},
List: []Tag{},
}
for ref, hash := range refs {
if !strings.HasPrefix(ref, "refs/tags/") { if !strings.HasPrefix(ref, "refs/tags/") {
continue continue
} }
@ -234,12 +294,52 @@ func (r *gitRepo) Tags(prefix string) ([]string, error) {
if !strings.HasPrefix(tag, prefix) { if !strings.HasPrefix(tag, prefix) {
continue continue
} }
tags = append(tags, tag) tags.List = append(tags.List, Tag{tag, hash})
} }
sort.Strings(tags) sort.Slice(tags.List, func(i, j int) bool {
return tags.List[i].Name < tags.List[j].Name
})
dir := prefix[:strings.LastIndex(prefix, "/")+1]
h := sha256.New()
for _, tag := range tags.List {
if isOriginTag(strings.TrimPrefix(tag.Name, dir)) {
fmt.Fprintf(h, "%q %s\n", tag.Name, tag.Hash)
}
}
tags.Origin.TagSum = "t1:" + base64.StdEncoding.EncodeToString(h.Sum(nil))
return tags, nil return tags, nil
} }
// repoSum returns a checksum of the entire repo state,
// which can be checked (as Origin.RepoSum) to cache
// the absence of a specific module version.
// The caller must supply refs, the result of a successful r.loadRefs.
func (r *gitRepo) repoSum(refs map[string]string) string {
var list []string
for ref := range refs {
list = append(list, ref)
}
sort.Strings(list)
h := sha256.New()
for _, ref := range list {
fmt.Fprintf(h, "%q %s\n", ref, refs[ref])
}
return "r1:" + base64.StdEncoding.EncodeToString(h.Sum(nil))
}
// unknownRevisionInfo returns a RevInfo containing an Origin containing a RepoSum of refs,
// for use when returning an UnknownRevisionError.
func (r *gitRepo) unknownRevisionInfo(refs map[string]string) *RevInfo {
return &RevInfo{
Origin: &Origin{
VCS: "git",
URL: r.remoteURL,
RepoSum: r.repoSum(refs),
},
}
}
func (r *gitRepo) Latest() (*RevInfo, error) { func (r *gitRepo) Latest() (*RevInfo, error) {
refs, err := r.loadRefs() refs, err := r.loadRefs()
if err != nil { if err != nil {
@ -248,7 +348,13 @@ func (r *gitRepo) Latest() (*RevInfo, error) {
if refs["HEAD"] == "" { if refs["HEAD"] == "" {
return nil, ErrNoCommits return nil, ErrNoCommits
} }
return r.Stat(refs["HEAD"]) info, err := r.Stat(refs["HEAD"])
if err != nil {
return nil, err
}
info.Origin.Ref = "HEAD"
info.Origin.Hash = refs["HEAD"]
return info, nil
} }
// findRef finds some ref name for the given hash, // findRef finds some ref name for the given hash,
@ -278,7 +384,7 @@ const minHashDigits = 7
// stat stats the given rev in the local repository, // stat stats the given rev in the local repository,
// or else it fetches more info from the remote repository and tries again. // or else it fetches more info from the remote repository and tries again.
func (r *gitRepo) stat(rev string) (*RevInfo, error) { func (r *gitRepo) stat(rev string) (info *RevInfo, err error) {
if r.local { if r.local {
return r.statLocal(rev, rev) return r.statLocal(rev, rev)
} }
@ -345,9 +451,19 @@ func (r *gitRepo) stat(rev string) (*RevInfo, error) {
hash = rev hash = rev
} }
} else { } else {
return nil, &UnknownRevisionError{Rev: rev} return r.unknownRevisionInfo(refs), &UnknownRevisionError{Rev: rev}
} }
defer func() {
if info != nil {
info.Origin.Hash = info.Name
// There's a ref = hash below; don't write that hash down as Origin.Ref.
if ref != info.Origin.Hash {
info.Origin.Ref = ref
}
}
}()
// Protect r.fetchLevel and the "fetch more and more" sequence. // Protect r.fetchLevel and the "fetch more and more" sequence.
unlock, err := r.mu.Lock() unlock, err := r.mu.Lock()
if err != nil { if err != nil {
@ -449,7 +565,12 @@ func (r *gitRepo) fetchRefsLocked() error {
func (r *gitRepo) statLocal(version, rev string) (*RevInfo, error) { func (r *gitRepo) statLocal(version, rev string) (*RevInfo, error) {
out, err := Run(r.dir, "git", "-c", "log.showsignature=false", "log", "--no-decorate", "-n1", "--format=format:%H %ct %D", rev, "--") out, err := Run(r.dir, "git", "-c", "log.showsignature=false", "log", "--no-decorate", "-n1", "--format=format:%H %ct %D", rev, "--")
if err != nil { if err != nil {
return nil, &UnknownRevisionError{Rev: rev} // Return info with Origin.RepoSum if possible to allow caching of negative lookup.
var info *RevInfo
if refs, err := r.loadRefs(); err == nil {
info = r.unknownRevisionInfo(refs)
}
return info, &UnknownRevisionError{Rev: rev}
} }
f := strings.Fields(string(out)) f := strings.Fields(string(out))
if len(f) < 2 { if len(f) < 2 {
@ -465,11 +586,19 @@ func (r *gitRepo) statLocal(version, rev string) (*RevInfo, error) {
} }
info := &RevInfo{ info := &RevInfo{
Origin: &Origin{
VCS: "git",
URL: r.remoteURL,
Hash: hash,
},
Name: hash, Name: hash,
Short: ShortenSHA1(hash), Short: ShortenSHA1(hash),
Time: time.Unix(t, 0).UTC(), Time: time.Unix(t, 0).UTC(),
Version: hash, Version: hash,
} }
if !strings.HasPrefix(hash, rev) {
info.Origin.Ref = rev
}
// Add tags. Output looks like: // Add tags. Output looks like:
// ede458df7cd0fdca520df19a33158086a8a68e81 1523994202 HEAD -> master, tag: v1.2.4-annotated, tag: v1.2.3, origin/master, origin/HEAD // ede458df7cd0fdca520df19a33158086a8a68e81 1523994202 HEAD -> master, tag: v1.2.4-annotated, tag: v1.2.3, origin/master, origin/HEAD
@ -580,7 +709,7 @@ func (r *gitRepo) RecentTag(rev, prefix string, allowed func(tag string) bool) (
if err != nil { if err != nil {
return "", err return "", err
} }
if len(tags) == 0 { if len(tags.List) == 0 {
return "", nil return "", nil
} }
@ -634,7 +763,7 @@ func (r *gitRepo) DescendsFrom(rev, tag string) (bool, error) {
if err != nil { if err != nil {
return false, err return false, err
} }
if len(tags) == 0 { if len(tags.List) == 0 {
return false, nil return false, nil
} }

View file

@ -43,7 +43,7 @@ var altRepos = []string{
// For now, at least the hgrepo1 tests check the general vcs.go logic. // For now, at least the hgrepo1 tests check the general vcs.go logic.
// localGitRepo is like gitrepo1 but allows archive access. // localGitRepo is like gitrepo1 but allows archive access.
var localGitRepo string var localGitRepo, localGitURL string
func testMain(m *testing.M) int { func testMain(m *testing.M) int {
dir, err := os.MkdirTemp("", "gitrepo-test-") dir, err := os.MkdirTemp("", "gitrepo-test-")
@ -65,6 +65,15 @@ func testMain(m *testing.M) int {
if _, err := Run(localGitRepo, "git", "config", "daemon.uploadarch", "true"); err != nil { if _, err := Run(localGitRepo, "git", "config", "daemon.uploadarch", "true"); err != nil {
log.Fatal(err) log.Fatal(err)
} }
// Convert absolute path to file URL. LocalGitRepo will not accept
// Windows absolute paths because they look like a host:path remote.
// TODO(golang.org/issue/32456): use url.FromFilePath when implemented.
if strings.HasPrefix(localGitRepo, "/") {
localGitURL = "file://" + localGitRepo
} else {
localGitURL = "file:///" + filepath.ToSlash(localGitRepo)
}
} }
} }
@ -73,17 +82,8 @@ func testMain(m *testing.M) int {
func testRepo(t *testing.T, remote string) (Repo, error) { func testRepo(t *testing.T, remote string) (Repo, error) {
if remote == "localGitRepo" { if remote == "localGitRepo" {
// Convert absolute path to file URL. LocalGitRepo will not accept
// Windows absolute paths because they look like a host:path remote.
// TODO(golang.org/issue/32456): use url.FromFilePath when implemented.
var url string
if strings.HasPrefix(localGitRepo, "/") {
url = "file://" + localGitRepo
} else {
url = "file:///" + filepath.ToSlash(localGitRepo)
}
testenv.MustHaveExecPath(t, "git") testenv.MustHaveExecPath(t, "git")
return LocalGitRepo(url) return LocalGitRepo(localGitURL)
} }
vcs := "git" vcs := "git"
for _, k := range []string{"hg"} { for _, k := range []string{"hg"} {
@ -98,13 +98,28 @@ func testRepo(t *testing.T, remote string) (Repo, error) {
var tagsTests = []struct { var tagsTests = []struct {
repo string repo string
prefix string prefix string
tags []string tags []Tag
}{ }{
{gitrepo1, "xxx", []string{}}, {gitrepo1, "xxx", []Tag{}},
{gitrepo1, "", []string{"v1.2.3", "v1.2.4-annotated", "v2.0.1", "v2.0.2", "v2.3"}}, {gitrepo1, "", []Tag{
{gitrepo1, "v", []string{"v1.2.3", "v1.2.4-annotated", "v2.0.1", "v2.0.2", "v2.3"}}, {"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"},
{gitrepo1, "v1", []string{"v1.2.3", "v1.2.4-annotated"}}, {"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"},
{gitrepo1, "2", []string{}}, {"v2.0.1", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
{"v2.0.2", "9d02800338b8a55be062c838d1f02e0c5780b9eb"},
{"v2.3", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
}},
{gitrepo1, "v", []Tag{
{"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"},
{"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"},
{"v2.0.1", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
{"v2.0.2", "9d02800338b8a55be062c838d1f02e0c5780b9eb"},
{"v2.3", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"},
}},
{gitrepo1, "v1", []Tag{
{"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"},
{"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"},
}},
{gitrepo1, "2", []Tag{}},
} }
func TestTags(t *testing.T) { func TestTags(t *testing.T) {
@ -121,13 +136,24 @@ func TestTags(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !reflect.DeepEqual(tags, tt.tags) { if tags == nil || !reflect.DeepEqual(tags.List, tt.tags) {
t.Errorf("Tags: incorrect tags\nhave %v\nwant %v", tags, tt.tags) t.Errorf("Tags(%q): incorrect tags\nhave %v\nwant %v", tt.prefix, tags, tt.tags)
} }
} }
t.Run(path.Base(tt.repo)+"/"+tt.prefix, f) t.Run(path.Base(tt.repo)+"/"+tt.prefix, f)
if tt.repo == gitrepo1 { if tt.repo == gitrepo1 {
// Clear hashes.
clearTags := []Tag{}
for _, tag := range tt.tags {
clearTags = append(clearTags, Tag{tag.Name, ""})
}
tags := tt.tags
for _, tt.repo = range altRepos { for _, tt.repo = range altRepos {
if strings.Contains(tt.repo, "Git") {
tt.tags = tags
} else {
tt.tags = clearTags
}
t.Run(path.Base(tt.repo)+"/"+tt.prefix, f) t.Run(path.Base(tt.repo)+"/"+tt.prefix, f)
} }
} }
@ -141,6 +167,12 @@ var latestTests = []struct {
{ {
gitrepo1, gitrepo1,
&RevInfo{ &RevInfo{
Origin: &Origin{
VCS: "git",
URL: "https://vcs-test.golang.org/git/gitrepo1",
Ref: "HEAD",
Hash: "ede458df7cd0fdca520df19a33158086a8a68e81",
},
Name: "ede458df7cd0fdca520df19a33158086a8a68e81", Name: "ede458df7cd0fdca520df19a33158086a8a68e81",
Short: "ede458df7cd0", Short: "ede458df7cd0",
Version: "ede458df7cd0fdca520df19a33158086a8a68e81", Version: "ede458df7cd0fdca520df19a33158086a8a68e81",
@ -151,6 +183,11 @@ var latestTests = []struct {
{ {
hgrepo1, hgrepo1,
&RevInfo{ &RevInfo{
Origin: &Origin{
VCS: "hg",
URL: "https://vcs-test.golang.org/hg/hgrepo1",
Hash: "18518c07eb8ed5c80221e997e518cccaa8c0c287",
},
Name: "18518c07eb8ed5c80221e997e518cccaa8c0c287", Name: "18518c07eb8ed5c80221e997e518cccaa8c0c287",
Short: "18518c07eb8e", Short: "18518c07eb8e",
Version: "18518c07eb8ed5c80221e997e518cccaa8c0c287", Version: "18518c07eb8ed5c80221e997e518cccaa8c0c287",
@ -174,12 +211,17 @@ func TestLatest(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if !reflect.DeepEqual(info, tt.info) { if !reflect.DeepEqual(info, tt.info) {
t.Errorf("Latest: incorrect info\nhave %+v\nwant %+v", *info, *tt.info) t.Errorf("Latest: incorrect info\nhave %+v (origin %+v)\nwant %+v (origin %+v)", info, info.Origin, tt.info, tt.info.Origin)
} }
} }
t.Run(path.Base(tt.repo), f) t.Run(path.Base(tt.repo), f)
if tt.repo == gitrepo1 { if tt.repo == gitrepo1 {
tt.repo = "localGitRepo" tt.repo = "localGitRepo"
info := *tt.info
tt.info = &info
o := *info.Origin
info.Origin = &o
o.URL = localGitURL
t.Run(path.Base(tt.repo), f) t.Run(path.Base(tt.repo), f)
} }
} }
@ -590,11 +632,12 @@ func TestStat(t *testing.T) {
if !strings.Contains(err.Error(), tt.err) { if !strings.Contains(err.Error(), tt.err) {
t.Fatalf("Stat: wrong error %q, want %q", err, tt.err) t.Fatalf("Stat: wrong error %q, want %q", err, tt.err)
} }
if info != nil { if info != nil && info.Origin == nil {
t.Errorf("Stat: non-nil info with error %q", err) t.Errorf("Stat: non-nil info with nil Origin with error %q", err)
} }
return return
} }
info.Origin = nil // TestLatest and ../../../testdata/script/reuse_git.txt test Origin well enough
if !reflect.DeepEqual(info, tt.info) { if !reflect.DeepEqual(info, tt.info) {
t.Errorf("Stat: incorrect info\nhave %+v\nwant %+v", *info, *tt.info) t.Errorf("Stat: incorrect info\nhave %+v\nwant %+v", *info, *tt.info)
} }

View file

@ -290,7 +290,13 @@ func (r *vcsRepo) loadBranches() {
} }
} }
func (r *vcsRepo) Tags(prefix string) ([]string, error) { var ErrNoRepoHash = errors.New("RepoHash not supported")
func (r *vcsRepo) CheckReuse(old *Origin, subdir string) error {
return fmt.Errorf("vcs %s does not implement CheckReuse", r.cmd.vcs)
}
func (r *vcsRepo) Tags(prefix string) (*Tags, error) {
unlock, err := r.mu.Lock() unlock, err := r.mu.Lock()
if err != nil { if err != nil {
return nil, err return nil, err
@ -298,14 +304,24 @@ func (r *vcsRepo) Tags(prefix string) ([]string, error) {
defer unlock() defer unlock()
r.tagsOnce.Do(r.loadTags) r.tagsOnce.Do(r.loadTags)
tags := &Tags{
tags := []string{} // None of the other VCS provide a reasonable way to compute TagSum
// without downloading the whole repo, so we only include VCS and URL
// in the Origin.
Origin: &Origin{
VCS: r.cmd.vcs,
URL: r.remote,
},
List: []Tag{},
}
for tag := range r.tags { for tag := range r.tags {
if strings.HasPrefix(tag, prefix) { if strings.HasPrefix(tag, prefix) {
tags = append(tags, tag) tags.List = append(tags.List, Tag{tag, ""})
} }
} }
sort.Strings(tags) sort.Slice(tags.List, func(i, j int) bool {
return tags.List[i].Name < tags.List[j].Name
})
return tags, nil return tags, nil
} }
@ -352,7 +368,16 @@ func (r *vcsRepo) statLocal(rev string) (*RevInfo, error) {
if err != nil { if err != nil {
return nil, &UnknownRevisionError{Rev: rev} return nil, &UnknownRevisionError{Rev: rev}
} }
return r.cmd.parseStat(rev, string(out)) info, err := r.cmd.parseStat(rev, string(out))
if err != nil {
return nil, err
}
if info.Origin == nil {
info.Origin = new(Origin)
}
info.Origin.VCS = r.cmd.vcs
info.Origin.URL = r.remote
return info, nil
} }
func (r *vcsRepo) Latest() (*RevInfo, error) { func (r *vcsRepo) Latest() (*RevInfo, error) {
@ -491,6 +516,9 @@ func hgParseStat(rev, out string) (*RevInfo, error) {
sort.Strings(tags) sort.Strings(tags)
info := &RevInfo{ info := &RevInfo{
Origin: &Origin{
Hash: hash,
},
Name: hash, Name: hash,
Short: ShortenSHA1(hash), Short: ShortenSHA1(hash),
Time: time.Unix(t, 0).UTC(), Time: time.Unix(t, 0).UTC(),
@ -569,6 +597,9 @@ func fossilParseStat(rev, out string) (*RevInfo, error) {
version = hash // extend to full hash version = hash // extend to full hash
} }
info := &RevInfo{ info := &RevInfo{
Origin: &Origin{
Hash: hash,
},
Name: hash, Name: hash,
Short: ShortenSHA1(hash), Short: ShortenSHA1(hash),
Time: t, Time: t,

View file

@ -130,12 +130,16 @@ func (r *codeRepo) ModulePath() string {
return r.modPath return r.modPath
} }
func (r *codeRepo) Versions(prefix string) ([]string, error) { func (r *codeRepo) CheckReuse(old *codehost.Origin) error {
return r.code.CheckReuse(old, r.codeDir)
}
func (r *codeRepo) Versions(prefix string) (*Versions, error) {
// Special case: gopkg.in/macaroon-bakery.v2-unstable // Special case: gopkg.in/macaroon-bakery.v2-unstable
// does not use the v2 tags (those are for macaroon-bakery.v2). // does not use the v2 tags (those are for macaroon-bakery.v2).
// It has no possible tags at all. // It has no possible tags at all.
if strings.HasPrefix(r.modPath, "gopkg.in/") && strings.HasSuffix(r.modPath, "-unstable") { if strings.HasPrefix(r.modPath, "gopkg.in/") && strings.HasSuffix(r.modPath, "-unstable") {
return nil, nil return &Versions{}, nil
} }
p := prefix p := prefix
@ -149,16 +153,21 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) {
Err: err, Err: err,
} }
} }
if tags.Origin != nil {
tags.Origin.Subdir = r.codeDir
}
var list, incompatible []string var list, incompatible []string
for _, tag := range tags { for _, tag := range tags.List {
if !strings.HasPrefix(tag, p) { if !strings.HasPrefix(tag.Name, p) {
continue continue
} }
v := tag v := tag.Name
if r.codeDir != "" { if r.codeDir != "" {
v = v[len(r.codeDir)+1:] v = v[len(r.codeDir)+1:]
} }
// Note: ./codehost/codehost.go's isOriginTag knows about these conditions too.
// If these are relaxed, isOriginTag will need to be relaxed as well.
if v == "" || v != semver.Canonical(v) { if v == "" || v != semver.Canonical(v) {
// Ignore non-canonical tags: Stat rewrites those to canonical // Ignore non-canonical tags: Stat rewrites those to canonical
// pseudo-versions. Note that we compare against semver.Canonical here // pseudo-versions. Note that we compare against semver.Canonical here
@ -186,7 +195,7 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) {
semver.Sort(list) semver.Sort(list)
semver.Sort(incompatible) semver.Sort(incompatible)
return r.appendIncompatibleVersions(list, incompatible) return r.appendIncompatibleVersions(tags.Origin, list, incompatible)
} }
// appendIncompatibleVersions appends "+incompatible" versions to list if // appendIncompatibleVersions appends "+incompatible" versions to list if
@ -196,10 +205,14 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) {
// prefix. // prefix.
// //
// Both list and incompatible must be sorted in semantic order. // Both list and incompatible must be sorted in semantic order.
func (r *codeRepo) appendIncompatibleVersions(list, incompatible []string) ([]string, error) { func (r *codeRepo) appendIncompatibleVersions(origin *codehost.Origin, list, incompatible []string) (*Versions, error) {
versions := &Versions{
Origin: origin,
List: list,
}
if len(incompatible) == 0 || r.pathMajor != "" { if len(incompatible) == 0 || r.pathMajor != "" {
// No +incompatible versions are possible, so no need to check them. // No +incompatible versions are possible, so no need to check them.
return list, nil return versions, nil
} }
versionHasGoMod := func(v string) (bool, error) { versionHasGoMod := func(v string) (bool, error) {
@ -232,7 +245,7 @@ func (r *codeRepo) appendIncompatibleVersions(list, incompatible []string) ([]st
// (github.com/russross/blackfriday@v2.0.0 and // (github.com/russross/blackfriday@v2.0.0 and
// github.com/libp2p/go-libp2p@v6.0.23), and (as of 2019-10-29) have no // github.com/libp2p/go-libp2p@v6.0.23), and (as of 2019-10-29) have no
// concrete examples for which it is undesired. // concrete examples for which it is undesired.
return list, nil return versions, nil
} }
} }
@ -271,10 +284,10 @@ func (r *codeRepo) appendIncompatibleVersions(list, incompatible []string) ([]st
// bounds. // bounds.
continue continue
} }
list = append(list, v+"+incompatible") versions.List = append(versions.List, v+"+incompatible")
} }
return list, nil return versions, nil
} }
func (r *codeRepo) Stat(rev string) (*RevInfo, error) { func (r *codeRepo) Stat(rev string) (*RevInfo, error) {
@ -284,7 +297,15 @@ func (r *codeRepo) Stat(rev string) (*RevInfo, error) {
codeRev := r.revToRev(rev) codeRev := r.revToRev(rev)
info, err := r.code.Stat(codeRev) info, err := r.code.Stat(codeRev)
if err != nil { if err != nil {
return nil, &module.ModuleError{ // Note: info may be non-nil to supply Origin for caching error.
var revInfo *RevInfo
if info != nil {
revInfo = &RevInfo{
Origin: info.Origin,
Version: rev,
}
}
return revInfo, &module.ModuleError{
Path: r.modPath, Path: r.modPath,
Err: &module.InvalidVersionError{ Err: &module.InvalidVersionError{
Version: rev, Version: rev,
@ -439,7 +460,31 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
return nil, errIncompatible return nil, errIncompatible
} }
origin := info.Origin
if origin != nil {
o := *origin
origin = &o
origin.Subdir = r.codeDir
if module.IsPseudoVersion(v) && (v != statVers || !strings.HasPrefix(v, "v0.0.0-")) {
// Add tags that are relevant to pseudo-version calculation to origin.
prefix := r.codeDir
if prefix != "" {
prefix += "/"
}
if r.pathMajor != "" { // "/v2" or "/.v2"
prefix += r.pathMajor[1:] + "." // += "v2."
}
tags, err := r.code.Tags(prefix)
if err != nil {
return nil, err
}
origin.TagPrefix = tags.Origin.TagPrefix
origin.TagSum = tags.Origin.TagSum
}
}
return &RevInfo{ return &RevInfo{
Origin: origin,
Name: info.Name, Name: info.Name,
Short: info.Short, Short: info.Short,
Time: info.Time, Time: info.Time,
@ -674,11 +719,11 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string)
var lastTag string // Prefer to log some real tag rather than a canonically-equivalent base. var lastTag string // Prefer to log some real tag rather than a canonically-equivalent base.
ancestorFound := false ancestorFound := false
for _, tag := range tags { for _, tag := range tags.List {
versionOnly := strings.TrimPrefix(tag, tagPrefix) versionOnly := strings.TrimPrefix(tag.Name, tagPrefix)
if semver.Compare(versionOnly, base) == 0 { if semver.Compare(versionOnly, base) == 0 {
lastTag = tag lastTag = tag.Name
ancestorFound, err = r.code.DescendsFrom(info.Name, tag) ancestorFound, err = r.code.DescendsFrom(info.Name, tag.Name)
if ancestorFound { if ancestorFound {
break break
} }
@ -747,7 +792,7 @@ func (r *codeRepo) findDir(version string) (rev, dir string, gomod []byte, err e
file1 := path.Join(r.codeDir, "go.mod") file1 := path.Join(r.codeDir, "go.mod")
gomod1, err1 := r.code.ReadFile(rev, file1, codehost.MaxGoMod) gomod1, err1 := r.code.ReadFile(rev, file1, codehost.MaxGoMod)
if err1 != nil && !os.IsNotExist(err1) { if err1 != nil && !os.IsNotExist(err1) {
return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.pathPrefix, file1, rev, err1) return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.codeRoot, file1, rev, err1)
} }
mpath1 := modfile.ModulePath(gomod1) mpath1 := modfile.ModulePath(gomod1)
found1 := err1 == nil && (isMajor(mpath1, r.pathMajor) || r.canReplaceMismatchedVersionDueToBug(mpath1)) found1 := err1 == nil && (isMajor(mpath1, r.pathMajor) || r.canReplaceMismatchedVersionDueToBug(mpath1))
@ -765,7 +810,7 @@ func (r *codeRepo) findDir(version string) (rev, dir string, gomod []byte, err e
file2 = path.Join(dir2, "go.mod") file2 = path.Join(dir2, "go.mod")
gomod2, err2 := r.code.ReadFile(rev, file2, codehost.MaxGoMod) gomod2, err2 := r.code.ReadFile(rev, file2, codehost.MaxGoMod)
if err2 != nil && !os.IsNotExist(err2) { if err2 != nil && !os.IsNotExist(err2) {
return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.pathPrefix, file2, rev, err2) return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.codeRoot, file2, rev, err2)
} }
mpath2 := modfile.ModulePath(gomod2) mpath2 := modfile.ModulePath(gomod2)
found2 := err2 == nil && isMajor(mpath2, r.pathMajor) found2 := err2 == nil && isMajor(mpath2, r.pathMajor)
@ -778,9 +823,9 @@ func (r *codeRepo) findDir(version string) (rev, dir string, gomod []byte, err e
} }
if err2 == nil { if err2 == nil {
if mpath2 == "" { if mpath2 == "" {
return "", "", nil, fmt.Errorf("%s/%s is missing module path at revision %s", r.pathPrefix, file2, rev) return "", "", nil, fmt.Errorf("%s/%s is missing module path at revision %s", r.codeRoot, file2, rev)
} }
return "", "", nil, fmt.Errorf("%s/%s has non-...%s module path %q at revision %s", r.pathPrefix, file2, r.pathMajor, mpath2, rev) return "", "", nil, fmt.Errorf("%s/%s has non-...%s module path %q at revision %s", r.codeRoot, file2, r.pathMajor, mpath2, rev)
} }
} }
@ -922,10 +967,11 @@ func (r *codeRepo) modPrefix(rev string) string {
} }
func (r *codeRepo) retractedVersions() (func(string) bool, error) { func (r *codeRepo) retractedVersions() (func(string) bool, error) {
versions, err := r.Versions("") vs, err := r.Versions("")
if err != nil { if err != nil {
return nil, err return nil, err
} }
versions := vs.List
for i, v := range versions { for i, v := range versions {
if strings.HasSuffix(v, "+incompatible") { if strings.HasSuffix(v, "+incompatible") {

View file

@ -823,7 +823,7 @@ func TestCodeRepoVersions(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Versions(%q): %v", tt.prefix, err) t.Fatalf("Versions(%q): %v", tt.prefix, err)
} }
if !reflect.DeepEqual(list, tt.versions) { if !reflect.DeepEqual(list.List, tt.versions) {
t.Fatalf("Versions(%q):\nhave %v\nwant %v", tt.prefix, list, tt.versions) t.Fatalf("Versions(%q):\nhave %v\nwant %v", tt.prefix, list, tt.versions)
} }
}) })
@ -921,7 +921,13 @@ type fixedTagsRepo struct {
codehost.Repo codehost.Repo
} }
func (ch *fixedTagsRepo) Tags(string) ([]string, error) { return ch.tags, nil } func (ch *fixedTagsRepo) Tags(string) (*codehost.Tags, error) {
tags := &codehost.Tags{}
for _, t := range ch.tags {
tags.List = append(tags.List, codehost.Tag{Name: t})
}
return tags, nil
}
func TestNonCanonicalSemver(t *testing.T) { func TestNonCanonicalSemver(t *testing.T) {
root := "golang.org/x/issue24476" root := "golang.org/x/issue24476"
@ -945,7 +951,7 @@ func TestNonCanonicalSemver(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if len(v) != 1 || v[0] != "v1.0.1" { if len(v.List) != 1 || v.List[0] != "v1.0.1" {
t.Fatal("unexpected versions returned:", v) t.Fatal("unexpected versions returned:", v)
} }
} }

View file

@ -225,6 +225,12 @@ func (p *proxyRepo) ModulePath() string {
return p.path return p.path
} }
var errProxyReuse = fmt.Errorf("proxy does not support CheckReuse")
func (p *proxyRepo) CheckReuse(old *codehost.Origin) error {
return errProxyReuse
}
// versionError returns err wrapped in a ModuleError for p.path. // versionError returns err wrapped in a ModuleError for p.path.
func (p *proxyRepo) versionError(version string, err error) error { func (p *proxyRepo) versionError(version string, err error) error {
if version != "" && version != module.CanonicalVersion(version) { if version != "" && version != module.CanonicalVersion(version) {
@ -279,7 +285,7 @@ func (p *proxyRepo) getBody(path string) (r io.ReadCloser, err error) {
return resp.Body, nil return resp.Body, nil
} }
func (p *proxyRepo) Versions(prefix string) ([]string, error) { func (p *proxyRepo) Versions(prefix string) (*Versions, error) {
data, err := p.getBytes("@v/list") data, err := p.getBytes("@v/list")
if err != nil { if err != nil {
p.listLatestOnce.Do(func() { p.listLatestOnce.Do(func() {
@ -299,7 +305,7 @@ func (p *proxyRepo) Versions(prefix string) ([]string, error) {
p.listLatest, p.listLatestErr = p.latestFromList(allLine) p.listLatest, p.listLatestErr = p.latestFromList(allLine)
}) })
semver.Sort(list) semver.Sort(list)
return list, nil return &Versions{List: list}, nil
} }
func (p *proxyRepo) latest() (*RevInfo, error) { func (p *proxyRepo) latest() (*RevInfo, error) {
@ -317,9 +323,8 @@ func (p *proxyRepo) latest() (*RevInfo, error) {
func (p *proxyRepo) latestFromList(allLine []string) (*RevInfo, error) { func (p *proxyRepo) latestFromList(allLine []string) (*RevInfo, error) {
var ( var (
bestTime time.Time bestTime time.Time
bestTimeIsFromPseudo bool bestVersion string
bestVersion string
) )
for _, line := range allLine { for _, line := range allLine {
f := strings.Fields(line) f := strings.Fields(line)
@ -327,14 +332,12 @@ func (p *proxyRepo) latestFromList(allLine []string) (*RevInfo, error) {
// If the proxy includes timestamps, prefer the timestamp it reports. // If the proxy includes timestamps, prefer the timestamp it reports.
// Otherwise, derive the timestamp from the pseudo-version. // Otherwise, derive the timestamp from the pseudo-version.
var ( var (
ft time.Time ft time.Time
ftIsFromPseudo = false
) )
if len(f) >= 2 { if len(f) >= 2 {
ft, _ = time.Parse(time.RFC3339, f[1]) ft, _ = time.Parse(time.RFC3339, f[1])
} else if module.IsPseudoVersion(f[0]) { } else if module.IsPseudoVersion(f[0]) {
ft, _ = module.PseudoVersionTime(f[0]) ft, _ = module.PseudoVersionTime(f[0])
ftIsFromPseudo = true
} else { } else {
// Repo.Latest promises that this method is only called where there are // Repo.Latest promises that this method is only called where there are
// no tagged versions. Ignore any tagged versions that were added in the // no tagged versions. Ignore any tagged versions that were added in the
@ -343,7 +346,6 @@ func (p *proxyRepo) latestFromList(allLine []string) (*RevInfo, error) {
} }
if bestTime.Before(ft) { if bestTime.Before(ft) {
bestTime = ft bestTime = ft
bestTimeIsFromPseudo = ftIsFromPseudo
bestVersion = f[0] bestVersion = f[0]
} }
} }
@ -352,22 +354,8 @@ func (p *proxyRepo) latestFromList(allLine []string) (*RevInfo, error) {
return nil, p.versionError("", codehost.ErrNoCommits) return nil, p.versionError("", codehost.ErrNoCommits)
} }
if bestTimeIsFromPseudo { // Call Stat to get all the other fields, including Origin information.
// We parsed bestTime from the pseudo-version, but that's in UTC and we're return p.Stat(bestVersion)
// supposed to report the timestamp as reported by the VCS.
// Stat the selected version to canonicalize the timestamp.
//
// TODO(bcmills): Should we also stat other versions to ensure that we
// report the correct Name and Short for the revision?
return p.Stat(bestVersion)
}
return &RevInfo{
Version: bestVersion,
Name: bestVersion,
Short: bestVersion,
Time: bestTime,
}, nil
} }
func (p *proxyRepo) Stat(rev string) (*RevInfo, error) { func (p *proxyRepo) Stat(rev string) (*RevInfo, error) {

View file

@ -29,6 +29,12 @@ type Repo interface {
// ModulePath returns the module path. // ModulePath returns the module path.
ModulePath() string ModulePath() string
// CheckReuse checks whether the validation criteria in the origin
// are still satisfied on the server corresponding to this module.
// If so, the caller can reuse any cached Versions or RevInfo containing
// this origin rather than redownloading those from the server.
CheckReuse(old *codehost.Origin) error
// Versions lists all known versions with the given prefix. // Versions lists all known versions with the given prefix.
// Pseudo-versions are not included. // Pseudo-versions are not included.
// //
@ -42,7 +48,7 @@ type Repo interface {
// //
// If the underlying repository does not exist, // If the underlying repository does not exist,
// Versions returns an error matching errors.Is(_, os.NotExist). // Versions returns an error matching errors.Is(_, os.NotExist).
Versions(prefix string) ([]string, error) Versions(prefix string) (*Versions, error)
// Stat returns information about the revision rev. // Stat returns information about the revision rev.
// A revision can be any identifier known to the underlying service: // A revision can be any identifier known to the underlying service:
@ -61,7 +67,14 @@ type Repo interface {
Zip(dst io.Writer, version string) error Zip(dst io.Writer, version string) error
} }
// A Rev describes a single revision in a module repository. // A Versions describes the available versions in a module repository.
type Versions struct {
Origin *codehost.Origin `json:",omitempty"` // origin information for reuse
List []string // semver versions
}
// A RevInfo describes a single revision in a module repository.
type RevInfo struct { type RevInfo struct {
Version string // suggested version string for this revision Version string // suggested version string for this revision
Time time.Time // commit time Time time.Time // commit time
@ -70,6 +83,8 @@ type RevInfo struct {
// but they are not recorded when talking about module versions. // but they are not recorded when talking about module versions.
Name string `json:"-"` // complete ID in underlying repository Name string `json:"-"` // complete ID in underlying repository
Short string `json:"-"` // shortened ID, for use in pseudo-version Short string `json:"-"` // shortened ID, for use in pseudo-version
Origin *codehost.Origin `json:",omitempty"` // provenance for reuse
} }
// Re: module paths, import paths, repository roots, and lookups // Re: module paths, import paths, repository roots, and lookups
@ -320,7 +335,14 @@ func (l *loggingRepo) ModulePath() string {
return l.r.ModulePath() return l.r.ModulePath()
} }
func (l *loggingRepo) Versions(prefix string) (tags []string, err error) { func (l *loggingRepo) CheckReuse(old *codehost.Origin) (err error) {
defer func() {
logCall("CheckReuse[%s]: %v", l.r.ModulePath(), err)
}()
return l.r.CheckReuse(old)
}
func (l *loggingRepo) Versions(prefix string) (*Versions, error) {
defer logCall("Repo[%s]: Versions(%q)", l.r.ModulePath(), prefix)() defer logCall("Repo[%s]: Versions(%q)", l.r.ModulePath(), prefix)()
return l.r.Versions(prefix) return l.r.Versions(prefix)
} }
@ -360,11 +382,12 @@ type errRepo struct {
func (r errRepo) ModulePath() string { return r.modulePath } func (r errRepo) ModulePath() string { return r.modulePath }
func (r errRepo) Versions(prefix string) (tags []string, err error) { return nil, r.err } func (r errRepo) CheckReuse(old *codehost.Origin) error { return r.err }
func (r errRepo) Stat(rev string) (*RevInfo, error) { return nil, r.err } func (r errRepo) Versions(prefix string) (*Versions, error) { return nil, r.err }
func (r errRepo) Latest() (*RevInfo, error) { return nil, r.err } func (r errRepo) Stat(rev string) (*RevInfo, error) { return nil, r.err }
func (r errRepo) GoMod(version string) ([]byte, error) { return nil, r.err } func (r errRepo) Latest() (*RevInfo, error) { return nil, r.err }
func (r errRepo) Zip(dst io.Writer, version string) error { return r.err } func (r errRepo) GoMod(version string) ([]byte, error) { return nil, r.err }
func (r errRepo) Zip(dst io.Writer, version string) error { return r.err }
// A notExistError is like fs.ErrNotExist, but with a custom message // A notExistError is like fs.ErrNotExist, but with a custom message
type notExistError struct { type notExistError struct {

View file

@ -0,0 +1,87 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"encoding/hex"
"encoding/json"
"go/build"
"internal/diff"
"path/filepath"
"reflect"
"runtime"
"testing"
)
func init() {
isTest = true
enabled = true // to allow GODEBUG=goindex=0 go test, when things are very broken
}
func TestIndex(t *testing.T) {
src := filepath.Join(runtime.GOROOT(), "src")
checkPkg := func(t *testing.T, m *Module, pkg string, data []byte) {
p := m.Package(pkg)
bp, err := p.Import(build.Default, build.ImportComment)
if err != nil {
t.Fatal(err)
}
bp1, err := build.Default.Import(pkg, filepath.Join(src, pkg), build.ImportComment)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(bp, bp1) {
t.Errorf("mismatch")
t.Logf("index:\n%s", hex.Dump(data))
js, err := json.MarshalIndent(bp, "", "\t")
if err != nil {
t.Fatal(err)
}
js1, err := json.MarshalIndent(bp1, "", "\t")
if err != nil {
t.Fatal(err)
}
t.Logf("diff:\n%s", diff.Diff("index", js, "correct", js1))
t.FailNow()
}
}
// Check packages in increasing complexity, one at a time.
pkgs := []string{
"crypto",
"encoding",
"unsafe",
"encoding/json",
"runtime",
"net",
}
var raws []*rawPackage
for _, pkg := range pkgs {
raw := importRaw(src, pkg)
raws = append(raws, raw)
t.Run(pkg, func(t *testing.T) {
data := encodeModuleBytes([]*rawPackage{raw})
m, err := fromBytes(src, data)
if err != nil {
t.Fatal(err)
}
checkPkg(t, m, pkg, data)
})
}
// Check that a multi-package index works too.
t.Run("all", func(t *testing.T) {
data := encodeModuleBytes(raws)
m, err := fromBytes(src, data)
if err != nil {
t.Fatal(err)
}
for _, pkg := range pkgs {
checkPkg(t, m, pkg, data)
}
})
}

View file

@ -15,7 +15,6 @@ import (
"internal/godebug" "internal/godebug"
"internal/goroot" "internal/goroot"
"internal/unsafeheader" "internal/unsafeheader"
"math"
"path" "path"
"path/filepath" "path/filepath"
"runtime" "runtime"
@ -45,10 +44,9 @@ var enabled bool = godebug.Get("goindex") != "0"
// do the equivalent of build.Import of packages in the module and answer other // do the equivalent of build.Import of packages in the module and answer other
// questions based on the index file's data. // questions based on the index file's data.
type Module struct { type Module struct {
modroot string modroot string
od offsetDecoder d *decoder
packages map[string]int // offsets of each package n int // number of packages
packagePaths []string // paths to package directories relative to modroot; these are the keys of packages
} }
// moduleHash returns an ActionID corresponding to the state of the module // moduleHash returns an ActionID corresponding to the state of the module
@ -141,6 +139,9 @@ func GetPackage(modroot, pkgdir string) (*IndexPackage, error) {
if !errors.Is(err, errNotFromModuleCache) { if !errors.Is(err, errNotFromModuleCache) {
return nil, err return nil, err
} }
if cfg.BuildContext.Compiler == "gccgo" && str.HasPathPrefix(modroot, cfg.GOROOTsrc) {
return nil, err // gccgo has no sources for GOROOT packages.
}
return openIndexPackage(modroot, pkgdir) return openIndexPackage(modroot, pkgdir)
} }
@ -179,6 +180,7 @@ func openIndexModule(modroot string, ismodcache bool) (*Module, error) {
err error err error
} }
r := mcache.Do(modroot, func() any { r := mcache.Do(modroot, func() any {
fsys.Trace("openIndexModule", modroot)
id, err := moduleHash(modroot, ismodcache) id, err := moduleHash(modroot, ismodcache)
if err != nil { if err != nil {
return result{nil, err} return result{nil, err}
@ -212,6 +214,7 @@ func openIndexPackage(modroot, pkgdir string) (*IndexPackage, error) {
err error err error
} }
r := pcache.Do([2]string{modroot, pkgdir}, func() any { r := pcache.Do([2]string{modroot, pkgdir}, func() any {
fsys.Trace("openIndexPackage", pkgdir)
id, err := dirHash(modroot, pkgdir) id, err := dirHash(modroot, pkgdir)
if err != nil { if err != nil {
return result{nil, err} return result{nil, err}
@ -234,110 +237,131 @@ func openIndexPackage(modroot, pkgdir string) (*IndexPackage, error) {
return r.pkg, r.err return r.pkg, r.err
} }
var errCorrupt = errors.New("corrupt index")
// protect marks the start of a large section of code that accesses the index.
// It should be used as:
//
// defer unprotect(protect, &err)
//
// It should not be used for trivial accesses which would be
// dwarfed by the overhead of the defer.
func protect() bool {
return debug.SetPanicOnFault(true)
}
var isTest = false
// unprotect marks the end of a large section of code that accesses the index.
// It should be used as:
//
// defer unprotect(protect, &err)
//
// end looks for panics due to errCorrupt or bad mmap accesses.
// When it finds them, it adds explanatory text, consumes the panic, and sets *errp instead.
// If errp is nil, end adds the explanatory text but then calls base.Fatalf.
func unprotect(old bool, errp *error) {
// SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed
// that all its errors satisfy this interface, we'll only check for these errors so that
// we don't suppress panics that could have been produced from other sources.
type addrer interface {
Addr() uintptr
}
debug.SetPanicOnFault(old)
if e := recover(); e != nil {
if _, ok := e.(addrer); ok || e == errCorrupt {
// This panic was almost certainly caused by SetPanicOnFault or our panic(errCorrupt).
err := fmt.Errorf("error reading module index: %v", e)
if errp != nil {
*errp = err
return
}
if isTest {
panic(err)
}
base.Fatalf("%v", err)
}
// The panic was likely not caused by SetPanicOnFault.
panic(e)
}
}
// fromBytes returns a *Module given the encoded representation. // fromBytes returns a *Module given the encoded representation.
func fromBytes(moddir string, data []byte) (mi *Module, err error) { func fromBytes(moddir string, data []byte) (m *Module, err error) {
if !enabled { if !enabled {
panic("use of index") panic("use of index")
} }
// SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed defer unprotect(protect(), &err)
// that all its errors satisfy this interface, we'll only check for these errors so that
// we don't suppress panics that could have been produced from other sources. if !bytes.HasPrefix(data, []byte(indexVersion+"\n")) {
type addrer interface { return nil, errCorrupt
Addr() uintptr
} }
// set PanicOnFault to true so that we can catch errors on the initial reads of the slice, const hdr = len(indexVersion + "\n")
// in case it's mmapped (the common case). d := &decoder{data: data}
old := debug.SetPanicOnFault(true) str := d.intAt(hdr)
defer func() { if str < hdr+8 || len(d.data) < str {
debug.SetPanicOnFault(old) return nil, errCorrupt
if e := recover(); e != nil {
if _, ok := e.(addrer); ok {
// This panic was almost certainly caused by SetPanicOnFault.
err = fmt.Errorf("error reading module index: %v", e)
return
}
// The panic was likely not caused by SetPanicOnFault.
panic(e)
}
}()
gotVersion, unread, _ := bytes.Cut(data, []byte{'\n'})
if string(gotVersion) != indexVersion {
return nil, fmt.Errorf("bad index version string: %q", gotVersion)
} }
stringTableOffset, unread := binary.LittleEndian.Uint32(unread[:4]), unread[4:] d.data, d.str = data[:str], d.data[str:]
st := newStringTable(data[stringTableOffset:]) // Check that string table looks valid.
d := decoder{unread, st} // First string is empty string (length 0),
numPackages := d.int() // and we leave a marker byte 0xFF at the end
// just to make sure that the file is not truncated.
packagePaths := make([]string, numPackages) if len(d.str) == 0 || d.str[0] != 0 || d.str[len(d.str)-1] != 0xFF {
for i := range packagePaths { return nil, errCorrupt
packagePaths[i] = d.string()
}
packageOffsets := make([]int, numPackages)
for i := range packageOffsets {
packageOffsets[i] = d.int()
}
packages := make(map[string]int, numPackages)
for i := range packagePaths {
packages[packagePaths[i]] = packageOffsets[i]
} }
return &Module{ n := d.intAt(hdr + 4)
if n < 0 || n > (len(d.data)-8)/8 {
return nil, errCorrupt
}
m = &Module{
moddir, moddir,
offsetDecoder{data, st}, d,
packages, n,
packagePaths, }
}, nil return m, nil
} }
// packageFromBytes returns a *IndexPackage given the encoded representation. // packageFromBytes returns a *IndexPackage given the encoded representation.
func packageFromBytes(modroot string, data []byte) (p *IndexPackage, err error) { func packageFromBytes(modroot string, data []byte) (p *IndexPackage, err error) {
if !enabled { m, err := fromBytes(modroot, data)
panic("use of package index when not enabled") if err != nil {
return nil, err
} }
if m.n != 1 {
// SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed return nil, fmt.Errorf("corrupt single-package index")
// that all its errors satisfy this interface, we'll only check for these errors so that
// we don't suppress panics that could have been produced from other sources.
type addrer interface {
Addr() uintptr
} }
return m.pkg(0), nil
// set PanicOnFault to true so that we can catch errors on the initial reads of the slice,
// in case it's mmapped (the common case).
old := debug.SetPanicOnFault(true)
defer func() {
debug.SetPanicOnFault(old)
if e := recover(); e != nil {
if _, ok := e.(addrer); ok {
// This panic was almost certainly caused by SetPanicOnFault.
err = fmt.Errorf("error reading module index: %v", e)
return
}
// The panic was likely not caused by SetPanicOnFault.
panic(e)
}
}()
gotVersion, unread, _ := bytes.Cut(data, []byte{'\n'})
if string(gotVersion) != indexVersion {
return nil, fmt.Errorf("bad index version string: %q", gotVersion)
}
stringTableOffset, unread := binary.LittleEndian.Uint32(unread[:4]), unread[4:]
st := newStringTable(data[stringTableOffset:])
d := &decoder{unread, st}
p = decodePackage(d, offsetDecoder{data, st})
p.modroot = modroot
return p, nil
} }
// Returns a list of directory paths, relative to the modroot, for // pkgDir returns the dir string of the i'th package in the index.
// packages contained in the module index. func (m *Module) pkgDir(i int) string {
func (mi *Module) Packages() []string { if i < 0 || i >= m.n {
return mi.packagePaths panic(errCorrupt)
}
return m.d.stringAt(12 + 8 + 8*i)
}
// pkgOff returns the offset of the data for the i'th package in the index.
func (m *Module) pkgOff(i int) int {
if i < 0 || i >= m.n {
panic(errCorrupt)
}
return m.d.intAt(12 + 8 + 8*i + 4)
}
// Walk calls f for each package in the index, passing the path to that package relative to the module root.
func (m *Module) Walk(f func(path string)) {
defer unprotect(protect(), nil)
for i := 0; i < m.n; i++ {
f(m.pkgDir(i))
}
} }
// relPath returns the path relative to the module's root. // relPath returns the path relative to the module's root.
@ -347,11 +371,7 @@ func relPath(path, modroot string) string {
// Import is the equivalent of build.Import given the information in Module. // Import is the equivalent of build.Import given the information in Module.
func (rp *IndexPackage) Import(bctxt build.Context, mode build.ImportMode) (p *build.Package, err error) { func (rp *IndexPackage) Import(bctxt build.Context, mode build.ImportMode) (p *build.Package, err error) {
defer func() { defer unprotect(protect(), &err)
if e := recover(); e != nil {
err = fmt.Errorf("error reading module index: %v", e)
}
}()
ctxt := (*Context)(&bctxt) ctxt := (*Context)(&bctxt)
@ -792,46 +812,44 @@ type IndexPackage struct {
var errCannotFindPackage = errors.New("cannot find package") var errCannotFindPackage = errors.New("cannot find package")
// Package returns an IndexPackage constructed using the information in the Module. // Package and returns finds the package with the given path (relative to the module root).
func (mi *Module) Package(path string) *IndexPackage { // If the package does not exist, Package returns an IndexPackage that will return an
defer func() { // appropriate error from its methods.
if e := recover(); e != nil { func (m *Module) Package(path string) *IndexPackage {
base.Fatalf("error reading module index: %v", e) defer unprotect(protect(), nil)
}
}()
offset, ok := mi.packages[path]
if !ok {
return &IndexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(mi.modroot, path))}
}
// TODO(matloob): do we want to lock on the module index? i, ok := sort.Find(m.n, func(i int) int {
d := mi.od.decoderAt(offset) return strings.Compare(path, m.pkgDir(i))
p := decodePackage(d, mi.od) })
p.modroot = mi.modroot if !ok {
return p return &IndexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(m.modroot, path))}
}
return m.pkg(i)
} }
func decodePackage(d *decoder, od offsetDecoder) *IndexPackage { // pkgAt returns the i'th IndexPackage in m.
rp := new(IndexPackage) func (m *Module) pkg(i int) *IndexPackage {
if errstr := d.string(); errstr != "" { r := m.d.readAt(m.pkgOff(i))
rp.error = errors.New(errstr) p := new(IndexPackage)
if errstr := r.string(); errstr != "" {
p.error = errors.New(errstr)
} }
rp.dir = d.string() p.dir = r.string()
numSourceFiles := d.uint32() p.sourceFiles = make([]*sourceFile, r.int())
rp.sourceFiles = make([]*sourceFile, numSourceFiles) for i := range p.sourceFiles {
for i := uint32(0); i < numSourceFiles; i++ { p.sourceFiles[i] = &sourceFile{
offset := d.uint32() d: m.d,
rp.sourceFiles[i] = &sourceFile{ pos: r.int(),
od: od.offsetDecoderAt(offset),
} }
} }
return rp p.modroot = m.modroot
return p
} }
// sourceFile represents the information of a given source file in the module index. // sourceFile represents the information of a given source file in the module index.
type sourceFile struct { type sourceFile struct {
od offsetDecoder // od interprets all offsets relative to the start of the source file's data d *decoder // encoding of this source file
pos int // start of sourceFile encoding in d
onceReadImports sync.Once onceReadImports sync.Once
savedImports []rawImport // saved imports so that they're only read once savedImports []rawImport // saved imports so that they're only read once
} }
@ -851,73 +869,67 @@ const (
) )
func (sf *sourceFile) error() string { func (sf *sourceFile) error() string {
return sf.od.stringAt(sourceFileError) return sf.d.stringAt(sf.pos + sourceFileError)
} }
func (sf *sourceFile) parseError() string { func (sf *sourceFile) parseError() string {
return sf.od.stringAt(sourceFileParseError) return sf.d.stringAt(sf.pos + sourceFileParseError)
} }
func (sf *sourceFile) synopsis() string { func (sf *sourceFile) synopsis() string {
return sf.od.stringAt(sourceFileSynopsis) return sf.d.stringAt(sf.pos + sourceFileSynopsis)
} }
func (sf *sourceFile) name() string { func (sf *sourceFile) name() string {
return sf.od.stringAt(sourceFileName) return sf.d.stringAt(sf.pos + sourceFileName)
} }
func (sf *sourceFile) pkgName() string { func (sf *sourceFile) pkgName() string {
return sf.od.stringAt(sourceFilePkgName) return sf.d.stringAt(sf.pos + sourceFilePkgName)
} }
func (sf *sourceFile) ignoreFile() bool { func (sf *sourceFile) ignoreFile() bool {
return sf.od.boolAt(sourceFileIgnoreFile) return sf.d.boolAt(sf.pos + sourceFileIgnoreFile)
} }
func (sf *sourceFile) binaryOnly() bool { func (sf *sourceFile) binaryOnly() bool {
return sf.od.boolAt(sourceFileBinaryOnly) return sf.d.boolAt(sf.pos + sourceFileBinaryOnly)
} }
func (sf *sourceFile) cgoDirectives() string { func (sf *sourceFile) cgoDirectives() string {
return sf.od.stringAt(sourceFileCgoDirectives) return sf.d.stringAt(sf.pos + sourceFileCgoDirectives)
} }
func (sf *sourceFile) goBuildConstraint() string { func (sf *sourceFile) goBuildConstraint() string {
return sf.od.stringAt(sourceFileGoBuildConstraint) return sf.d.stringAt(sf.pos + sourceFileGoBuildConstraint)
} }
func (sf *sourceFile) plusBuildConstraints() []string { func (sf *sourceFile) plusBuildConstraints() []string {
d := sf.od.decoderAt(sourceFileNumPlusBuildConstraints) pos := sf.pos + sourceFileNumPlusBuildConstraints
n := d.int() n := sf.d.intAt(pos)
pos += 4
ret := make([]string, n) ret := make([]string, n)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
ret[i] = d.string() ret[i] = sf.d.stringAt(pos)
pos += 4
} }
return ret return ret
} }
func importsOffset(numPlusBuildConstraints int) int {
// 4 bytes per uin32, add one to advance past numPlusBuildConstraints itself
return sourceFileNumPlusBuildConstraints + 4*(numPlusBuildConstraints+1)
}
func (sf *sourceFile) importsOffset() int { func (sf *sourceFile) importsOffset() int {
numPlusBuildConstraints := sf.od.intAt(sourceFileNumPlusBuildConstraints) pos := sf.pos + sourceFileNumPlusBuildConstraints
return importsOffset(numPlusBuildConstraints) n := sf.d.intAt(pos)
} // each build constraint is 1 uint32
return pos + 4 + n*4
func embedsOffset(importsOffset, numImports int) int {
// 4 bytes per uint32; 1 to advance past numImports itself, and 5 uint32s per import
return importsOffset + 4*(1+(5*numImports))
} }
func (sf *sourceFile) embedsOffset() int { func (sf *sourceFile) embedsOffset() int {
importsOffset := sf.importsOffset() pos := sf.importsOffset()
numImports := sf.od.intAt(importsOffset) n := sf.d.intAt(pos)
return embedsOffset(importsOffset, numImports) // each import is 5 uint32s (string + tokpos)
return pos + 4 + n*(4*5)
} }
func (sf *sourceFile) imports() []rawImport { func (sf *sourceFile) imports() []rawImport {
sf.onceReadImports.Do(func() { sf.onceReadImports.Do(func() {
importsOffset := sf.importsOffset() importsOffset := sf.importsOffset()
d := sf.od.decoderAt(importsOffset) r := sf.d.readAt(importsOffset)
numImports := d.int() numImports := r.int()
ret := make([]rawImport, numImports) ret := make([]rawImport, numImports)
for i := 0; i < numImports; i++ { for i := 0; i < numImports; i++ {
ret[i].path = d.string() ret[i] = rawImport{r.string(), r.tokpos()}
ret[i].position = d.tokpos()
} }
sf.savedImports = ret sf.savedImports = ret
}) })
@ -926,125 +938,15 @@ func (sf *sourceFile) imports() []rawImport {
func (sf *sourceFile) embeds() []embed { func (sf *sourceFile) embeds() []embed {
embedsOffset := sf.embedsOffset() embedsOffset := sf.embedsOffset()
d := sf.od.decoderAt(embedsOffset) r := sf.d.readAt(embedsOffset)
numEmbeds := d.int() numEmbeds := r.int()
ret := make([]embed, numEmbeds) ret := make([]embed, numEmbeds)
for i := range ret { for i := range ret {
pattern := d.string() ret[i] = embed{r.string(), r.tokpos()}
pos := d.tokpos()
ret[i] = embed{pattern, pos}
} }
return ret return ret
} }
// A decoder reads from the current position of the file and advances its position as it
// reads.
type decoder struct {
b []byte
st *stringTable
}
func (d *decoder) uint32() uint32 {
n := binary.LittleEndian.Uint32(d.b[:4])
d.b = d.b[4:]
return n
}
func (d *decoder) int() int {
n := d.uint32()
if int64(n) > math.MaxInt {
base.Fatalf("go: attempting to read a uint32 from the index that overflows int")
}
return int(n)
}
func (d *decoder) tokpos() token.Position {
file := d.string()
offset := d.int()
line := d.int()
column := d.int()
return token.Position{
Filename: file,
Offset: offset,
Line: line,
Column: column,
}
}
func (d *decoder) string() string {
return d.st.string(d.int())
}
// And offset decoder reads information offset from its position in the file.
// It's either offset from the beginning of the index, or the beginning of a sourceFile's data.
type offsetDecoder struct {
b []byte
st *stringTable
}
func (od *offsetDecoder) uint32At(offset int) uint32 {
if offset > len(od.b) {
base.Fatalf("go: trying to read from index file at offset higher than file length. This indicates a corrupt offset file in the cache.")
}
return binary.LittleEndian.Uint32(od.b[offset:])
}
func (od *offsetDecoder) intAt(offset int) int {
n := od.uint32At(offset)
if int64(n) > math.MaxInt {
base.Fatalf("go: attempting to read a uint32 from the index that overflows int")
}
return int(n)
}
func (od *offsetDecoder) boolAt(offset int) bool {
switch v := od.uint32At(offset); v {
case 0:
return false
case 1:
return true
default:
base.Fatalf("go: invalid bool value in index file encoding: %v", v)
}
panic("unreachable")
}
func (od *offsetDecoder) stringAt(offset int) string {
return od.st.string(od.intAt(offset))
}
func (od *offsetDecoder) decoderAt(offset int) *decoder {
return &decoder{od.b[offset:], od.st}
}
func (od *offsetDecoder) offsetDecoderAt(offset uint32) offsetDecoder {
return offsetDecoder{od.b[offset:], od.st}
}
type stringTable struct {
b []byte
}
func newStringTable(b []byte) *stringTable {
return &stringTable{b: b}
}
func (st *stringTable) string(pos int) string {
if pos == 0 {
return ""
}
bb := st.b[pos:]
i := bytes.IndexByte(bb, 0)
if i == -1 {
panic("reached end of string table trying to read string")
}
s := asString(bb[:i])
return s
}
func asString(b []byte) string { func asString(b []byte) string {
p := (*unsafeheader.Slice)(unsafe.Pointer(&b)).Data p := (*unsafeheader.Slice)(unsafe.Pointer(&b)).Data
@ -1055,3 +957,82 @@ func asString(b []byte) string {
return s return s
} }
// A decoder helps decode the index format.
type decoder struct {
data []byte // data after header
str []byte // string table
}
// intAt returns the int at the given offset in d.data.
func (d *decoder) intAt(off int) int {
if off < 0 || len(d.data)-off < 4 {
panic(errCorrupt)
}
i := binary.LittleEndian.Uint32(d.data[off : off+4])
if int32(i)>>31 != 0 {
panic(errCorrupt)
}
return int(i)
}
// boolAt returns the bool at the given offset in d.data.
func (d *decoder) boolAt(off int) bool {
return d.intAt(off) != 0
}
// stringTableAt returns the string pointed at by the int at the given offset in d.data.
func (d *decoder) stringAt(off int) string {
return d.stringTableAt(d.intAt(off))
}
// stringTableAt returns the string at the given offset in the string table d.str.
func (d *decoder) stringTableAt(off int) string {
if off < 0 || off >= len(d.str) {
panic(errCorrupt)
}
s := d.str[off:]
v, n := binary.Uvarint(s)
if n <= 0 || v > uint64(len(s[n:])) {
panic(errCorrupt)
}
return asString(s[n : n+int(v)])
}
// A reader reads sequential fields from a section of the index format.
type reader struct {
d *decoder
pos int
}
// readAt returns a reader starting at the given position in d.
func (d *decoder) readAt(pos int) *reader {
return &reader{d, pos}
}
// int reads the next int.
func (r *reader) int() int {
i := r.d.intAt(r.pos)
r.pos += 4
return i
}
// string reads the next string.
func (r *reader) string() string {
return r.d.stringTableAt(r.int())
}
// bool reads the next bool.
func (r *reader) bool() bool {
return r.int() != 0
}
// tokpos reads the next token.Position.
func (r *reader) tokpos() token.Position {
return token.Position{
Filename: r.string(),
Offset: r.int(),
Line: r.int(),
Column: r.int(),
}
}

View file

@ -46,6 +46,7 @@ func moduleWalkErr(modroot string, path string, info fs.FileInfo, err error) err
// encoded representation. It returns ErrNotIndexed if the module can't // encoded representation. It returns ErrNotIndexed if the module can't
// be indexed because it contains symlinks. // be indexed because it contains symlinks.
func indexModule(modroot string) ([]byte, error) { func indexModule(modroot string) ([]byte, error) {
fsys.Trace("indexModule", modroot)
var packages []*rawPackage var packages []*rawPackage
err := fsys.Walk(modroot, func(path string, info fs.FileInfo, err error) error { err := fsys.Walk(modroot, func(path string, info fs.FileInfo, err error) error {
if err := moduleWalkErr(modroot, path, info, err); err != nil { if err := moduleWalkErr(modroot, path, info, err); err != nil {
@ -72,6 +73,7 @@ func indexModule(modroot string) ([]byte, error) {
// encoded representation. It returns ErrNotIndexed if the package can't // encoded representation. It returns ErrNotIndexed if the package can't
// be indexed. // be indexed.
func indexPackage(modroot, pkgdir string) []byte { func indexPackage(modroot, pkgdir string) []byte {
fsys.Trace("indexPackage", pkgdir)
p := importRaw(modroot, relPath(pkgdir, modroot)) p := importRaw(modroot, relPath(pkgdir, modroot))
return encodePackageBytes(p) return encodePackageBytes(p)
} }

View file

@ -1,54 +1,46 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex package modindex
import ( import (
"cmd/go/internal/base" "cmd/go/internal/base"
"encoding/binary" "encoding/binary"
"go/token" "go/token"
"math"
"sort" "sort"
"strings"
) )
const indexVersion = "go index v0" const indexVersion = "go index v1" // 11 bytes (plus \n), to align uint32s in index
// encodeModuleBytes produces the encoded representation of the module index. // encodeModuleBytes produces the encoded representation of the module index.
// encodeModuleBytes may modify the packages slice. // encodeModuleBytes may modify the packages slice.
func encodeModuleBytes(packages []*rawPackage) []byte { func encodeModuleBytes(packages []*rawPackage) []byte {
e := newEncoder() e := newEncoder()
e.Bytes([]byte(indexVersion)) e.Bytes([]byte(indexVersion + "\n"))
e.Bytes([]byte{'\n'})
stringTableOffsetPos := e.Pos() // fill this at the end stringTableOffsetPos := e.Pos() // fill this at the end
e.Uint32(0) // string table offset e.Uint32(0) // string table offset
e.Int(len(packages))
sort.Slice(packages, func(i, j int) bool { sort.Slice(packages, func(i, j int) bool {
return packages[i].dir < packages[j].dir return packages[i].dir < packages[j].dir
}) })
e.Int(len(packages))
packagesPos := e.Pos()
for _, p := range packages { for _, p := range packages {
e.String(p.dir) e.String(p.dir)
}
packagesOffsetPos := e.Pos()
for range packages {
e.Int(0) e.Int(0)
} }
for i, p := range packages { for i, p := range packages {
e.IntAt(e.Pos(), packagesOffsetPos+4*i) e.IntAt(e.Pos(), packagesPos+8*i+4)
encodePackage(e, p) encodePackage(e, p)
} }
e.IntAt(e.Pos(), stringTableOffsetPos) e.IntAt(e.Pos(), stringTableOffsetPos)
e.Bytes(e.stringTable) e.Bytes(e.stringTable)
e.Bytes([]byte{0xFF}) // end of string table marker
return e.b return e.b
} }
func encodePackageBytes(p *rawPackage) []byte { func encodePackageBytes(p *rawPackage) []byte {
e := newEncoder() return encodeModuleBytes([]*rawPackage{p})
e.Bytes([]byte(indexVersion))
e.Bytes([]byte{'\n'})
stringTableOffsetPos := e.Pos() // fill this at the end
e.Uint32(0) // string table offset
encodePackage(e, p)
e.IntAt(e.Pos(), stringTableOffsetPos)
e.Bytes(e.stringTable)
return e.b
} }
func encodePackage(e *encoder, p *rawPackage) { func encodePackage(e *encoder, p *rawPackage) {
@ -126,9 +118,6 @@ func (e *encoder) Bytes(b []byte) {
} }
func (e *encoder) String(s string) { func (e *encoder) String(s string) {
if strings.IndexByte(s, 0) >= 0 {
base.Fatalf("go: attempting to encode a string containing a null byte")
}
if n, ok := e.strings[s]; ok { if n, ok := e.strings[s]; ok {
e.Int(n) e.Int(n)
return return
@ -136,8 +125,8 @@ func (e *encoder) String(s string) {
pos := len(e.stringTable) pos := len(e.stringTable)
e.strings[s] = pos e.strings[s] = pos
e.Int(pos) e.Int(pos)
e.stringTable = binary.AppendUvarint(e.stringTable, uint64(len(s)))
e.stringTable = append(e.stringTable, []byte(s)...) e.stringTable = append(e.stringTable, []byte(s)...)
e.stringTable = append(e.stringTable, 0)
} }
func (e *encoder) Bool(b bool) { func (e *encoder) Bool(b bool) {
@ -152,17 +141,18 @@ func (e *encoder) Uint32(n uint32) {
e.b = binary.LittleEndian.AppendUint32(e.b, n) e.b = binary.LittleEndian.AppendUint32(e.b, n)
} }
// Int encodes n. Note that all ints are written to the index as uint32s. // Int encodes n. Note that all ints are written to the index as uint32s,
// and to avoid problems on 32-bit systems we require fitting into a 32-bit int.
func (e *encoder) Int(n int) { func (e *encoder) Int(n int) {
if n < 0 || int64(n) > math.MaxUint32 { if n < 0 || int(int32(n)) != n {
base.Fatalf("go: attempting to write an int to the index that overflows uint32") base.Fatalf("go: attempting to write an int to the index that overflows int32")
} }
e.Uint32(uint32(n)) e.Uint32(uint32(n))
} }
func (e *encoder) IntAt(n int, at int) { func (e *encoder) IntAt(n int, at int) {
if n < 0 || int64(n) > math.MaxUint32 { if n < 0 || int(int32(n)) != n {
base.Fatalf("go: attempting to write an int to the index that overflows uint32") base.Fatalf("go: attempting to write an int to the index that overflows int32")
} }
binary.LittleEndian.PutUint32(e.b[at:], uint32(n)) binary.LittleEndian.PutUint32(e.b[at:], uint32(n))
} }

View file

@ -4,7 +4,11 @@
package modinfo package modinfo
import "time" import (
"cmd/go/internal/modfetch/codehost"
"encoding/json"
"time"
)
// Note that these structs are publicly visible (part of go list's API) // Note that these structs are publicly visible (part of go list's API)
// and the fields are documented in the help text in ../list/list.go // and the fields are documented in the help text in ../list/list.go
@ -12,6 +16,7 @@ import "time"
type ModulePublic struct { type ModulePublic struct {
Path string `json:",omitempty"` // module path Path string `json:",omitempty"` // module path
Version string `json:",omitempty"` // module version Version string `json:",omitempty"` // module version
Query string `json:",omitempty"` // version query corresponding to this version
Versions []string `json:",omitempty"` // available module versions Versions []string `json:",omitempty"` // available module versions
Replace *ModulePublic `json:",omitempty"` // replaced by this module Replace *ModulePublic `json:",omitempty"` // replaced by this module
Time *time.Time `json:",omitempty"` // time version was created Time *time.Time `json:",omitempty"` // time version was created
@ -24,12 +29,27 @@ type ModulePublic struct {
Retracted []string `json:",omitempty"` // retraction information, if any (with -retracted or -u) Retracted []string `json:",omitempty"` // retraction information, if any (with -retracted or -u)
Deprecated string `json:",omitempty"` // deprecation message, if any (with -u) Deprecated string `json:",omitempty"` // deprecation message, if any (with -u)
Error *ModuleError `json:",omitempty"` // error loading module Error *ModuleError `json:",omitempty"` // error loading module
Origin *codehost.Origin `json:",omitempty"` // provenance of module
Reuse bool `json:",omitempty"` // reuse of old module info is safe
} }
type ModuleError struct { type ModuleError struct {
Err string // error text Err string // error text
} }
type moduleErrorNoMethods ModuleError
// UnmarshalJSON accepts both {"Err":"text"} and "text",
// so that the output of go mod download -json can still
// be unmarshalled into a ModulePublic during -reuse processing.
func (e *ModuleError) UnmarshalJSON(data []byte) error {
if len(data) > 0 && data[0] == '"' {
return json.Unmarshal(data, &e.Err)
}
return json.Unmarshal(data, (*moduleErrorNoMethods)(e))
}
func (m *ModulePublic) String() string { func (m *ModulePublic) String() string {
s := m.Path s := m.Path
versionString := func(mm *ModulePublic) string { versionString := func(mm *ModulePublic) string {

View file

@ -17,6 +17,7 @@ import (
"cmd/go/internal/base" "cmd/go/internal/base"
"cmd/go/internal/cfg" "cmd/go/internal/cfg"
"cmd/go/internal/modfetch" "cmd/go/internal/modfetch"
"cmd/go/internal/modfetch/codehost"
"cmd/go/internal/modindex" "cmd/go/internal/modindex"
"cmd/go/internal/modinfo" "cmd/go/internal/modinfo"
"cmd/go/internal/search" "cmd/go/internal/search"
@ -60,7 +61,7 @@ func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePubli
} }
rs := LoadModFile(ctx) rs := LoadModFile(ctx)
return moduleInfo(ctx, rs, m, 0) return moduleInfo(ctx, rs, m, 0, nil)
} }
// PackageModRoot returns the module root directory for the module that provides // PackageModRoot returns the module root directory for the module that provides
@ -90,7 +91,7 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic {
if i := strings.Index(path, "@"); i >= 0 { if i := strings.Index(path, "@"); i >= 0 {
m := module.Version{Path: path[:i], Version: path[i+1:]} m := module.Version{Path: path[:i], Version: path[i+1:]}
return moduleInfo(ctx, nil, m, 0) return moduleInfo(ctx, nil, m, 0, nil)
} }
rs := LoadModFile(ctx) rs := LoadModFile(ctx)
@ -119,7 +120,7 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic {
} }
} }
return moduleInfo(ctx, rs, module.Version{Path: path, Version: v}, 0) return moduleInfo(ctx, rs, module.Version{Path: path, Version: v}, 0, nil)
} }
// addUpdate fills in m.Update if an updated version is available. // addUpdate fills in m.Update if an updated version is available.
@ -130,10 +131,15 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) {
info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed) info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed)
var noVersionErr *NoMatchingVersionError var noVersionErr *NoMatchingVersionError
if errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { if errors.Is(err, ErrDisallowed) ||
errors.Is(err, fs.ErrNotExist) ||
errors.As(err, &noVersionErr) {
// Ignore "not found" and "no matching version" errors. // Ignore "not found" and "no matching version" errors.
// This means the proxy has no matching version or no versions at all. // This means the proxy has no matching version or no versions at all.
// //
// Ignore "disallowed" errors. This means the current version is
// excluded or retracted and there are no higher allowed versions.
//
// We should report other errors though. An attacker that controls the // We should report other errors though. An attacker that controls the
// network shouldn't be able to hide versions by interfering with // network shouldn't be able to hide versions by interfering with
// the HTTPS connection. An attacker that controls the proxy may still // the HTTPS connection. An attacker that controls the proxy may still
@ -156,6 +162,45 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) {
} }
} }
// mergeOrigin merges two origins,
// returning and possibly modifying one of its arguments.
// If the two origins conflict, mergeOrigin returns a non-specific one
// that will not pass CheckReuse.
// If m1 or m2 is nil, the other is returned unmodified.
// But if m1 or m2 is non-nil and uncheckable, the result is also uncheckable,
// to preserve uncheckability.
func mergeOrigin(m1, m2 *codehost.Origin) *codehost.Origin {
if m1 == nil {
return m2
}
if m2 == nil {
return m1
}
if !m1.Checkable() {
return m1
}
if !m2.Checkable() {
return m2
}
if m2.TagSum != "" {
if m1.TagSum != "" && (m1.TagSum != m2.TagSum || m1.TagPrefix != m2.TagPrefix) {
m1.ClearCheckable()
return m1
}
m1.TagSum = m2.TagSum
m1.TagPrefix = m2.TagPrefix
}
if m2.Hash != "" {
if m1.Hash != "" && (m1.Hash != m2.Hash || m1.Ref != m2.Ref) {
m1.ClearCheckable()
return m1
}
m1.Hash = m2.Hash
m1.Ref = m2.Ref
}
return m1
}
// addVersions fills in m.Versions with the list of known versions. // addVersions fills in m.Versions with the list of known versions.
// Excluded versions will be omitted. If listRetracted is false, retracted // Excluded versions will be omitted. If listRetracted is false, retracted
// versions will also be omitted. // versions will also be omitted.
@ -164,11 +209,12 @@ func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted boo
if listRetracted { if listRetracted {
allowed = CheckExclusions allowed = CheckExclusions
} }
var err error v, origin, err := versions(ctx, m.Path, allowed)
m.Versions, err = versions(ctx, m.Path, allowed)
if err != nil && m.Error == nil { if err != nil && m.Error == nil {
m.Error = &modinfo.ModuleError{Err: err.Error()} m.Error = &modinfo.ModuleError{Err: err.Error()}
} }
m.Versions = v
m.Origin = mergeOrigin(m.Origin, origin)
} }
// addRetraction fills in m.Retracted if the module was retracted by its author. // addRetraction fills in m.Retracted if the module was retracted by its author.
@ -230,7 +276,7 @@ func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) {
// moduleInfo returns information about module m, loaded from the requirements // moduleInfo returns information about module m, loaded from the requirements
// in rs (which may be nil to indicate that m was not loaded from a requirement // in rs (which may be nil to indicate that m was not loaded from a requirement
// graph). // graph).
func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode) *modinfo.ModulePublic { func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic {
if m.Version == "" && MainModules.Contains(m.Path) { if m.Version == "" && MainModules.Contains(m.Path) {
info := &modinfo.ModulePublic{ info := &modinfo.ModulePublic{
Path: m.Path, Path: m.Path,
@ -260,6 +306,15 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li
// completeFromModCache fills in the extra fields in m using the module cache. // completeFromModCache fills in the extra fields in m using the module cache.
completeFromModCache := func(m *modinfo.ModulePublic) { completeFromModCache := func(m *modinfo.ModulePublic) {
if old := reuse[module.Version{Path: m.Path, Version: m.Version}]; old != nil {
if err := checkReuse(ctx, m.Path, old.Origin); err == nil {
*m = *old
m.Query = ""
m.Dir = ""
return
}
}
checksumOk := func(suffix string) bool { checksumOk := func(suffix string) bool {
return rs == nil || m.Version == "" || cfg.BuildMod == "mod" || return rs == nil || m.Version == "" || cfg.BuildMod == "mod" ||
modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix}) modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix})

View file

@ -397,7 +397,6 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
seen := map[module.Version]bool{} seen := map[module.Version]bool{}
for _, m := range roots { for _, m := range roots {
hasDepsInAll[m.Path] = true hasDepsInAll[m.Path] = true
seen[m] = true
} }
// This loop will terminate because it will call enqueue on each version of // This loop will terminate because it will call enqueue on each version of
// each dependency of the modules in hasDepsInAll at most once (and only // each dependency of the modules in hasDepsInAll at most once (and only
@ -406,11 +405,11 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
needsEnqueueing := map[module.Version]bool{} needsEnqueueing := map[module.Version]bool{}
for p := range hasDepsInAll { for p := range hasDepsInAll {
m := module.Version{Path: p, Version: mg.g.Selected(p)} m := module.Version{Path: p, Version: mg.g.Selected(p)}
reqs, ok := mg.g.RequiredBy(m) if !seen[m] {
if !ok {
needsEnqueueing[m] = true needsEnqueueing[m] = true
continue continue
} }
reqs, _ := mg.g.RequiredBy(m)
for _, r := range reqs { for _, r := range reqs {
s := module.Version{Path: r.Path, Version: mg.g.Selected(r.Path)} s := module.Version{Path: r.Path, Version: mg.g.Selected(r.Path)}
if cmpVersion(s.Version, r.Version) > 0 && !seen[s] { if cmpVersion(s.Version, r.Version) > 0 && !seen[s] {

View file

@ -509,7 +509,7 @@ func (l *versionLimiter) UpgradeToward(ctx context.Context, m module.Version) er
} }
if l.check(m, l.pruning).isDisqualified() { if l.check(m, l.pruning).isDisqualified() {
candidates, err := versions(ctx, m.Path, CheckAllowed) candidates, _, err := versions(ctx, m.Path, CheckAllowed)
if err != nil { if err != nil {
// This is likely a transient error reaching the repository, // This is likely a transient error reaching the repository,
// rather than a permanent error with the retrieved version. // rather than a permanent error with the retrieved version.

View file

@ -5,15 +5,19 @@
package modload package modload
import ( import (
"bytes"
"context" "context"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"io"
"os" "os"
"runtime" "runtime"
"strings" "strings"
"cmd/go/internal/base" "cmd/go/internal/base"
"cmd/go/internal/cfg" "cmd/go/internal/cfg"
"cmd/go/internal/modfetch/codehost"
"cmd/go/internal/modinfo" "cmd/go/internal/modinfo"
"cmd/go/internal/search" "cmd/go/internal/search"
@ -34,13 +38,44 @@ const (
// along with any error preventing additional matches from being identified. // along with any error preventing additional matches from being identified.
// //
// The returned slice can be nonempty even if the error is non-nil. // The returned slice can be nonempty even if the error is non-nil.
func ListModules(ctx context.Context, args []string, mode ListMode) ([]*modinfo.ModulePublic, error) { func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile string) ([]*modinfo.ModulePublic, error) {
rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode) var reuse map[module.Version]*modinfo.ModulePublic
if reuseFile != "" {
data, err := os.ReadFile(reuseFile)
if err != nil {
return nil, err
}
dec := json.NewDecoder(bytes.NewReader(data))
reuse = make(map[module.Version]*modinfo.ModulePublic)
for {
var m modinfo.ModulePublic
if err := dec.Decode(&m); err != nil {
if err == io.EOF {
break
}
return nil, fmt.Errorf("parsing %s: %v", reuseFile, err)
}
if m.Origin == nil || !m.Origin.Checkable() {
// Nothing to check to validate reuse.
continue
}
m.Reuse = true
reuse[module.Version{Path: m.Path, Version: m.Version}] = &m
if m.Query != "" {
reuse[module.Version{Path: m.Path, Version: m.Query}] = &m
}
}
}
rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode, reuse)
type token struct{} type token struct{}
sem := make(chan token, runtime.GOMAXPROCS(0)) sem := make(chan token, runtime.GOMAXPROCS(0))
if mode != 0 { if mode != 0 {
for _, m := range mods { for _, m := range mods {
if m.Reuse {
continue
}
add := func(m *modinfo.ModulePublic) { add := func(m *modinfo.ModulePublic) {
sem <- token{} sem <- token{}
go func() { go func() {
@ -80,11 +115,11 @@ func ListModules(ctx context.Context, args []string, mode ListMode) ([]*modinfo.
return mods, err return mods, err
} }
func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) { func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) {
if len(args) == 0 { if len(args) == 0 {
var ms []*modinfo.ModulePublic var ms []*modinfo.ModulePublic
for _, m := range MainModules.Versions() { for _, m := range MainModules.Versions() {
ms = append(ms, moduleInfo(ctx, rs, m, mode)) ms = append(ms, moduleInfo(ctx, rs, m, mode, reuse))
} }
return rs, ms, nil return rs, ms, nil
} }
@ -157,12 +192,17 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
// specific revision or used 'go list -retracted'. // specific revision or used 'go list -retracted'.
allowed = nil allowed = nil
} }
info, err := Query(ctx, path, vers, current, allowed) info, err := queryReuse(ctx, path, vers, current, allowed, reuse)
if err != nil { if err != nil {
var origin *codehost.Origin
if info != nil {
origin = info.Origin
}
mods = append(mods, &modinfo.ModulePublic{ mods = append(mods, &modinfo.ModulePublic{
Path: path, Path: path,
Version: vers, Version: vers,
Error: modinfoError(path, vers, err), Error: modinfoError(path, vers, err),
Origin: origin,
}) })
continue continue
} }
@ -171,7 +211,11 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
// *Requirements instead. // *Requirements instead.
var noRS *Requirements var noRS *Requirements
mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode) mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse)
if vers != mod.Version {
mod.Query = vers
}
mod.Origin = info.Origin
mods = append(mods, mod) mods = append(mods, mod)
continue continue
} }
@ -200,7 +244,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
continue continue
} }
if v != "none" { if v != "none" {
mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode)) mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse))
} else if cfg.BuildMod == "vendor" { } else if cfg.BuildMod == "vendor" {
// In vendor mode, we can't determine whether a missing module is “a // In vendor mode, we can't determine whether a missing module is “a
// known dependency” because the module graph is incomplete. // known dependency” because the module graph is incomplete.
@ -229,7 +273,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
matched = true matched = true
if !matchedModule[m] { if !matchedModule[m] {
matchedModule[m] = true matchedModule[m] = true
mods = append(mods, moduleInfo(ctx, rs, m, mode)) mods = append(mods, moduleInfo(ctx, rs, m, mode, reuse))
} }
} }
} }

View file

@ -11,6 +11,7 @@ import (
"sort" "sort"
"cmd/go/internal/modfetch" "cmd/go/internal/modfetch"
"cmd/go/internal/modfetch/codehost"
"golang.org/x/mod/module" "golang.org/x/mod/module"
"golang.org/x/mod/semver" "golang.org/x/mod/semver"
@ -78,11 +79,10 @@ func (*mvsReqs) Upgrade(m module.Version) (module.Version, error) {
return m, nil return m, nil
} }
func versions(ctx context.Context, path string, allowed AllowedFunc) ([]string, error) { func versions(ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) {
// Note: modfetch.Lookup and repo.Versions are cached, // Note: modfetch.Lookup and repo.Versions are cached,
// so there's no need for us to add extra caching here. // so there's no need for us to add extra caching here.
var versions []string err = modfetch.TryProxies(func(proxy string) error {
err := modfetch.TryProxies(func(proxy string) error {
repo, err := lookupRepo(proxy, path) repo, err := lookupRepo(proxy, path)
if err != nil { if err != nil {
return err return err
@ -91,8 +91,8 @@ func versions(ctx context.Context, path string, allowed AllowedFunc) ([]string,
if err != nil { if err != nil {
return err return err
} }
allowedVersions := make([]string, 0, len(allVersions)) allowedVersions := make([]string, 0, len(allVersions.List))
for _, v := range allVersions { for _, v := range allVersions.List {
if err := allowed(ctx, module.Version{Path: path, Version: v}); err == nil { if err := allowed(ctx, module.Version{Path: path, Version: v}); err == nil {
allowedVersions = append(allowedVersions, v) allowedVersions = append(allowedVersions, v)
} else if !errors.Is(err, ErrDisallowed) { } else if !errors.Is(err, ErrDisallowed) {
@ -100,9 +100,10 @@ func versions(ctx context.Context, path string, allowed AllowedFunc) ([]string,
} }
} }
versions = allowedVersions versions = allowedVersions
origin = allVersions.Origin
return nil return nil
}) })
return versions, err return versions, origin, err
} }
// previousVersion returns the tagged version of m.Path immediately prior to // previousVersion returns the tagged version of m.Path immediately prior to
@ -117,7 +118,7 @@ func previousVersion(m module.Version) (module.Version, error) {
return module.Version{Path: m.Path, Version: "none"}, nil return module.Version{Path: m.Path, Version: "none"}, nil
} }
list, err := versions(context.TODO(), m.Path, CheckAllowed) list, _, err := versions(context.TODO(), m.Path, CheckAllowed)
if err != nil { if err != nil {
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
return module.Version{Path: m.Path, Version: "none"}, nil return module.Version{Path: m.Path, Version: "none"}, nil

View file

@ -20,6 +20,8 @@ import (
"cmd/go/internal/cfg" "cmd/go/internal/cfg"
"cmd/go/internal/imports" "cmd/go/internal/imports"
"cmd/go/internal/modfetch" "cmd/go/internal/modfetch"
"cmd/go/internal/modfetch/codehost"
"cmd/go/internal/modinfo"
"cmd/go/internal/search" "cmd/go/internal/search"
"cmd/go/internal/str" "cmd/go/internal/str"
"cmd/go/internal/trace" "cmd/go/internal/trace"
@ -72,18 +74,39 @@ import (
// //
// If path is the path of the main module and the query is "latest", // If path is the path of the main module and the query is "latest",
// Query returns Target.Version as the version. // Query returns Target.Version as the version.
//
// Query often returns a non-nil *RevInfo with a non-nil error,
// to provide an info.Origin that can allow the error to be cached.
func Query(ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) { func Query(ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) {
ctx, span := trace.StartSpan(ctx, "modload.Query "+path) ctx, span := trace.StartSpan(ctx, "modload.Query "+path)
defer span.Done() defer span.Done()
return queryReuse(ctx, path, query, current, allowed, nil)
}
// queryReuse is like Query but also takes a map of module info that can be reused
// if the validation criteria in Origin are met.
func queryReuse(ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) {
var info *modfetch.RevInfo var info *modfetch.RevInfo
err := modfetch.TryProxies(func(proxy string) (err error) { err := modfetch.TryProxies(func(proxy string) (err error) {
info, err = queryProxy(ctx, proxy, path, query, current, allowed) info, err = queryProxy(ctx, proxy, path, query, current, allowed, reuse)
return err return err
}) })
return info, err return info, err
} }
// checkReuse checks whether a revision of a given module or a version list
// for a given module may be reused, according to the information in origin.
func checkReuse(ctx context.Context, path string, old *codehost.Origin) error {
return modfetch.TryProxies(func(proxy string) error {
repo, err := lookupRepo(proxy, path)
if err != nil {
return err
}
return repo.CheckReuse(old)
})
}
// AllowedFunc is used by Query and other functions to filter out unsuitable // AllowedFunc is used by Query and other functions to filter out unsuitable
// versions, for example, those listed in exclude directives in the main // versions, for example, those listed in exclude directives in the main
// module's go.mod file. // module's go.mod file.
@ -106,7 +129,7 @@ func (queryDisabledError) Error() string {
return fmt.Sprintf("cannot query module due to -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) return fmt.Sprintf("cannot query module due to -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason)
} }
func queryProxy(ctx context.Context, proxy, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) { func queryProxy(ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) {
ctx, span := trace.StartSpan(ctx, "modload.queryProxy "+path+" "+query) ctx, span := trace.StartSpan(ctx, "modload.queryProxy "+path+" "+query)
defer span.Done() defer span.Done()
@ -137,6 +160,19 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
return nil, err return nil, err
} }
if old := reuse[module.Version{Path: path, Version: query}]; old != nil {
if err := repo.CheckReuse(old.Origin); err == nil {
info := &modfetch.RevInfo{
Version: old.Version,
Origin: old.Origin,
}
if old.Time != nil {
info.Time = *old.Time
}
return info, nil
}
}
// Parse query to detect parse errors (and possibly handle query) // Parse query to detect parse errors (and possibly handle query)
// before any network I/O. // before any network I/O.
qm, err := newQueryMatcher(path, query, current, allowed) qm, err := newQueryMatcher(path, query, current, allowed)
@ -161,7 +197,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
} }
} }
if err != nil { if err != nil {
return nil, queryErr return info, queryErr
} }
} }
if err := allowed(ctx, module.Version{Path: path, Version: info.Version}); errors.Is(err, ErrDisallowed) { if err := allowed(ctx, module.Version{Path: path, Version: info.Version}); errors.Is(err, ErrDisallowed) {
@ -177,15 +213,23 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
if err != nil { if err != nil {
return nil, err return nil, err
} }
releases, prereleases, err := qm.filterVersions(ctx, versions) revErr := &modfetch.RevInfo{Origin: versions.Origin} // RevInfo to return with error
releases, prereleases, err := qm.filterVersions(ctx, versions.List)
if err != nil { if err != nil {
return nil, err return revErr, err
} }
lookup := func(v string) (*modfetch.RevInfo, error) { lookup := func(v string) (*modfetch.RevInfo, error) {
rev, err := repo.Stat(v) rev, err := repo.Stat(v)
// Stat can return a non-nil rev and a non-nil err,
// in order to provide origin information to make the error cacheable.
if rev == nil && err != nil {
return revErr, err
}
rev.Origin = mergeOrigin(rev.Origin, versions.Origin)
if err != nil { if err != nil {
return nil, err return rev, err
} }
if (query == "upgrade" || query == "patch") && module.IsPseudoVersion(current) && !rev.Time.IsZero() { if (query == "upgrade" || query == "patch") && module.IsPseudoVersion(current) && !rev.Time.IsZero() {
@ -210,9 +254,14 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
currentTime, err := module.PseudoVersionTime(current) currentTime, err := module.PseudoVersionTime(current)
if err == nil && rev.Time.Before(currentTime) { if err == nil && rev.Time.Before(currentTime) {
if err := allowed(ctx, module.Version{Path: path, Version: current}); errors.Is(err, ErrDisallowed) { if err := allowed(ctx, module.Version{Path: path, Version: current}); errors.Is(err, ErrDisallowed) {
return nil, err return revErr, err
} }
return repo.Stat(current) info, err := repo.Stat(current)
if info == nil && err != nil {
return revErr, err
}
info.Origin = mergeOrigin(info.Origin, versions.Origin)
return info, err
} }
} }
@ -242,7 +291,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
return lookup(latest.Version) return lookup(latest.Version)
} }
} else if !errors.Is(err, fs.ErrNotExist) { } else if !errors.Is(err, fs.ErrNotExist) {
return nil, err return revErr, err
} }
} }
@ -254,7 +303,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
return lookup(current) return lookup(current)
} }
return nil, &NoMatchingVersionError{query: query, current: current} return revErr, &NoMatchingVersionError{query: query, current: current}
} }
// IsRevisionQuery returns true if vers is a version query that may refer to // IsRevisionQuery returns true if vers is a version query that may refer to
@ -663,7 +712,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin
pathCurrent := current(path) pathCurrent := current(path)
r.Mod.Path = path r.Mod.Path = path
r.Rev, err = queryProxy(ctx, proxy, path, query, pathCurrent, allowed) r.Rev, err = queryProxy(ctx, proxy, path, query, pathCurrent, allowed, nil)
if err != nil { if err != nil {
return r, err return r, err
} }
@ -991,7 +1040,8 @@ func versionHasGoMod(_ context.Context, m module.Version) (bool, error) {
// available versions, but cannot fetch specific source files. // available versions, but cannot fetch specific source files.
type versionRepo interface { type versionRepo interface {
ModulePath() string ModulePath() string
Versions(prefix string) ([]string, error) CheckReuse(*codehost.Origin) error
Versions(prefix string) (*modfetch.Versions, error)
Stat(rev string) (*modfetch.RevInfo, error) Stat(rev string) (*modfetch.RevInfo, error)
Latest() (*modfetch.RevInfo, error) Latest() (*modfetch.RevInfo, error)
} }
@ -1023,8 +1073,13 @@ type emptyRepo struct {
var _ versionRepo = emptyRepo{} var _ versionRepo = emptyRepo{}
func (er emptyRepo) ModulePath() string { return er.path } func (er emptyRepo) ModulePath() string { return er.path }
func (er emptyRepo) Versions(prefix string) ([]string, error) { return nil, nil } func (er emptyRepo) CheckReuse(old *codehost.Origin) error {
return fmt.Errorf("empty repo")
}
func (er emptyRepo) Versions(prefix string) (*modfetch.Versions, error) {
return &modfetch.Versions{}, nil
}
func (er emptyRepo) Stat(rev string) (*modfetch.RevInfo, error) { return nil, er.err } func (er emptyRepo) Stat(rev string) (*modfetch.RevInfo, error) { return nil, er.err }
func (er emptyRepo) Latest() (*modfetch.RevInfo, error) { return nil, er.err } func (er emptyRepo) Latest() (*modfetch.RevInfo, error) { return nil, er.err }
@ -1042,15 +1097,22 @@ var _ versionRepo = (*replacementRepo)(nil)
func (rr *replacementRepo) ModulePath() string { return rr.repo.ModulePath() } func (rr *replacementRepo) ModulePath() string { return rr.repo.ModulePath() }
func (rr *replacementRepo) CheckReuse(old *codehost.Origin) error {
return fmt.Errorf("replacement repo")
}
// Versions returns the versions from rr.repo augmented with any matching // Versions returns the versions from rr.repo augmented with any matching
// replacement versions. // replacement versions.
func (rr *replacementRepo) Versions(prefix string) ([]string, error) { func (rr *replacementRepo) Versions(prefix string) (*modfetch.Versions, error) {
repoVersions, err := rr.repo.Versions(prefix) repoVersions, err := rr.repo.Versions(prefix)
if err != nil && !errors.Is(err, os.ErrNotExist) { if err != nil {
return nil, err if !errors.Is(err, os.ErrNotExist) {
return nil, err
}
repoVersions = new(modfetch.Versions)
} }
versions := repoVersions versions := repoVersions.List
for _, mm := range MainModules.Versions() { for _, mm := range MainModules.Versions() {
if index := MainModules.Index(mm); index != nil && len(index.replace) > 0 { if index := MainModules.Index(mm); index != nil && len(index.replace) > 0 {
path := rr.ModulePath() path := rr.ModulePath()
@ -1062,15 +1124,15 @@ func (rr *replacementRepo) Versions(prefix string) ([]string, error) {
} }
} }
if len(versions) == len(repoVersions) { // No replacement versions added. if len(versions) == len(repoVersions.List) { // replacement versions added
return versions, nil return repoVersions, nil
} }
sort.Slice(versions, func(i, j int) bool { sort.Slice(versions, func(i, j int) bool {
return semver.Compare(versions[i], versions[j]) < 0 return semver.Compare(versions[i], versions[j]) < 0
}) })
str.Uniq(&versions) str.Uniq(&versions)
return versions, nil return &modfetch.Versions{List: versions}, nil
} }
func (rr *replacementRepo) Stat(rev string) (*modfetch.RevInfo, error) { func (rr *replacementRepo) Stat(rev string) (*modfetch.RevInfo, error) {

View file

@ -216,21 +216,20 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f
// is the module's root directory on disk, index is the modindex.Module for the // is the module's root directory on disk, index is the modindex.Module for the
// module, and importPathRoot is the module's path prefix. // module, and importPathRoot is the module's path prefix.
func walkFromIndex(index *modindex.Module, importPathRoot string, isMatch, treeCanMatch func(string) bool, tags, have map[string]bool, addPkg func(string)) { func walkFromIndex(index *modindex.Module, importPathRoot string, isMatch, treeCanMatch func(string) bool, tags, have map[string]bool, addPkg func(string)) {
loopPackages: index.Walk(func(reldir string) {
for _, reldir := range index.Packages() {
// Avoid .foo, _foo, and testdata subdirectory trees. // Avoid .foo, _foo, and testdata subdirectory trees.
p := reldir p := reldir
for { for {
elem, rest, found := strings.Cut(p, string(filepath.Separator)) elem, rest, found := strings.Cut(p, string(filepath.Separator))
if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
continue loopPackages return
} }
if found && elem == "vendor" { if found && elem == "vendor" {
// Ignore this path if it contains the element "vendor" anywhere // Ignore this path if it contains the element "vendor" anywhere
// except for the last element (packages named vendor are allowed // except for the last element (packages named vendor are allowed
// for historical reasons). Note that found is true when this // for historical reasons). Note that found is true when this
// isn't the last path element. // isn't the last path element.
continue loopPackages return
} }
if !found { if !found {
// Didn't find the separator, so we're considering the last element. // Didn't find the separator, so we're considering the last element.
@ -241,12 +240,12 @@ loopPackages:
// Don't use GOROOT/src. // Don't use GOROOT/src.
if reldir == "" && importPathRoot == "" { if reldir == "" && importPathRoot == "" {
continue return
} }
name := path.Join(importPathRoot, filepath.ToSlash(reldir)) name := path.Join(importPathRoot, filepath.ToSlash(reldir))
if !treeCanMatch(name) { if !treeCanMatch(name) {
continue return
} }
if !have[name] { if !have[name] {
@ -257,7 +256,7 @@ loopPackages:
} }
} }
} }
} })
} }
// MatchInModule identifies the packages matching the given pattern within the // MatchInModule identifies the packages matching the given pattern within the

View file

@ -151,11 +151,10 @@ and test commands:
For example, when building with a non-standard configuration, For example, when building with a non-standard configuration,
use -pkgdir to keep generated packages in a separate location. use -pkgdir to keep generated packages in a separate location.
-tags tag,list -tags tag,list
a comma-separated list of build tags to consider satisfied during the a comma-separated list of additional build tags to consider satisfied
build. For more information about build tags, see the description of during the build. For more information about build tags, see
build constraints in the documentation for the go/build package. 'go help buildconstraint'. (Earlier versions of Go used a
(Earlier versions of Go used a space-separated list, and that form space-separated list, and that form is deprecated but still recognized.)
is deprecated but still recognized.)
-trimpath -trimpath
remove all file system paths from the resulting executable. remove all file system paths from the resulting executable.
Instead of absolute file system paths, the recorded file names Instead of absolute file system paths, the recorded file names

View file

@ -211,7 +211,11 @@ func buildModeInit() {
codegenArg = "-shared" codegenArg = "-shared"
ldBuildmode = "pie" ldBuildmode = "pie"
case "windows": case "windows":
ldBuildmode = "pie" if cfg.BuildRace {
ldBuildmode = "exe"
} else {
ldBuildmode = "pie"
}
case "ios": case "ios":
codegenArg = "-shared" codegenArg = "-shared"
ldBuildmode = "pie" ldBuildmode = "pie"

View file

@ -163,7 +163,7 @@ func (ts *testScript) setup() {
ts.cd = filepath.Join(ts.workdir, "gopath/src") ts.cd = filepath.Join(ts.workdir, "gopath/src")
ts.env = []string{ ts.env = []string{
"WORK=" + ts.workdir, // must be first for ts.abbrev "WORK=" + ts.workdir, // must be first for ts.abbrev
"PATH=" + testBin + string(filepath.ListSeparator) + os.Getenv("PATH"), pathEnvName() + "=" + testBin + string(filepath.ListSeparator) + os.Getenv(pathEnvName()),
homeEnvName() + "=/no-home", homeEnvName() + "=/no-home",
"CCACHE_DISABLE=1", // ccache breaks with non-existent HOME "CCACHE_DISABLE=1", // ccache breaks with non-existent HOME
"GOARCH=" + runtime.GOARCH, "GOARCH=" + runtime.GOARCH,
@ -187,8 +187,6 @@ func (ts *testScript) setup() {
tempEnvName() + "=" + filepath.Join(ts.workdir, "tmp"), tempEnvName() + "=" + filepath.Join(ts.workdir, "tmp"),
"devnull=" + os.DevNull, "devnull=" + os.DevNull,
"goversion=" + goVersion(ts), "goversion=" + goVersion(ts),
":=" + string(os.PathListSeparator),
"/=" + string(os.PathSeparator),
"CMDGO_TEST_RUN_MAIN=true", "CMDGO_TEST_RUN_MAIN=true",
} }
if testenv.Builder() != "" || os.Getenv("GIT_TRACE_CURL") == "1" { if testenv.Builder() != "" || os.Getenv("GIT_TRACE_CURL") == "1" {
@ -203,10 +201,6 @@ func (ts *testScript) setup() {
ts.env = append(ts.env, "TESTGONETWORK=panic", "TESTGOVCS=panic") ts.env = append(ts.env, "TESTGONETWORK=panic", "TESTGOVCS=panic")
} }
if runtime.GOOS == "plan9" {
ts.env = append(ts.env, "path="+testBin+string(filepath.ListSeparator)+os.Getenv("path"))
}
for _, key := range extraEnvKeys { for _, key := range extraEnvKeys {
if val := os.Getenv(key); val != "" { if val := os.Getenv(key); val != "" {
ts.env = append(ts.env, key+"="+val) ts.env = append(ts.env, key+"="+val)
@ -219,6 +213,10 @@ func (ts *testScript) setup() {
ts.envMap[kv[:i]] = kv[i+1:] ts.envMap[kv[:i]] = kv[i+1:]
} }
} }
// Add entries for ${:} and ${/} to make it easier to write platform-independent
// environment variables.
ts.envMap["/"] = string(os.PathSeparator)
ts.envMap[":"] = string(os.PathListSeparator)
fmt.Fprintf(&ts.log, "# (%s)\n", time.Now().UTC().Format(time.RFC3339)) fmt.Fprintf(&ts.log, "# (%s)\n", time.Now().UTC().Format(time.RFC3339))
ts.mark = ts.log.Len() ts.mark = ts.log.Len()
@ -1264,12 +1262,7 @@ func (ts *testScript) lookPath(command string) (string, error) {
} }
} }
pathName := "PATH" for _, dir := range strings.Split(ts.envMap[pathEnvName()], string(filepath.ListSeparator)) {
if runtime.GOOS == "plan9" {
pathName = "path"
}
for _, dir := range strings.Split(ts.envMap[pathName], string(filepath.ListSeparator)) {
if searchExt { if searchExt {
ents, err := os.ReadDir(dir) ents, err := os.ReadDir(dir)
if err != nil { if err != nil {

View file

@ -0,0 +1,9 @@
-- .mod --
module example.com/retract/noupgrade
go 1.19
retract v1.0.0 // bad
-- .info --
{"Version":"v1.0.0"}

View file

@ -41,12 +41,19 @@ Scripts also have access to these other environment variables:
GODEBUG=<actual GODEBUG> GODEBUG=<actual GODEBUG>
devnull=<value of os.DevNull> devnull=<value of os.DevNull>
goversion=<current Go version; for example, 1.12> goversion=<current Go version; for example, 1.12>
:=<OS-specific path list separator>
The scripts' supporting files are unpacked relative to $GOPATH/src (aka $WORK/gopath/src) On Plan 9, the variables $path and $home are set instead of $PATH and $HOME.
and then the script begins execution in that directory as well. Thus the example above runs On Windows, the variables $USERPROFILE and $TMP are set instead of
in $WORK/gopath/src with GOPATH=$WORK/gopath and $WORK/gopath/src/hello.go $HOME and $TMPDIR.
containing the listed contents.
In addition, variables named ':' and '/' are expanded within script arguments
(expanding to the value of os.PathListSeparator and os.PathSeparator
respectively) but are not inherited in subprocess environments.
The scripts' supporting files are unpacked relative to $GOPATH/src
(aka $WORK/gopath/src) and then the script begins execution in that directory as
well. Thus the example above runs in $WORK/gopath/src with GOPATH=$WORK/gopath
and $WORK/gopath/src/hello.go containing the listed contents.
The lines at the top of the script are a sequence of commands to be executed The lines at the top of the script are a sequence of commands to be executed
by a tiny script engine in ../../script_test.go (not the system shell). by a tiny script engine in ../../script_test.go (not the system shell).

View file

@ -6,11 +6,15 @@
cd sub cd sub
exec git init . exec git init .
exec git config user.name 'Nameless Gopher'
exec git config user.email 'nobody@golang.org'
exec git add sub.go exec git add sub.go
exec git commit -m 'initial state' exec git commit -m 'initial state'
cd .. cd ..
exec git init exec git init
exec git config user.name 'Nameless Gopher'
exec git config user.email 'nobody@golang.org'
exec git submodule add ./sub exec git submodule add ./sub
exec git add go.mod example.go exec git add go.mod example.go
exec git commit -m 'initial state' exec git commit -m 'initial state'

View file

@ -47,20 +47,24 @@ go build -overlay overlay.json -o main_call_asm$GOEXE ./call_asm
exec ./main_call_asm$GOEXE exec ./main_call_asm$GOEXE
! stdout . ! stdout .
# Change the contents of a file in the overlay and ensure that makes the target stale
go install -overlay overlay.json ./test_cache
go list -overlay overlay.json -f '{{.Stale}}' ./test_cache
stdout '^false$'
cp overlay/test_cache_different.go overlay/test_cache.go
go list -overlay overlay.json -f '{{.Stale}}' ./test_cache
stdout '^true$'
[cgo] go list -compiled -overlay overlay.json -f '{{range .CompiledGoFiles}}{{. | printf "%s\n"}}{{end}}' ./cgo_hello_replace [cgo] go list -compiled -overlay overlay.json -f '{{range .CompiledGoFiles}}{{. | printf "%s\n"}}{{end}}' ./cgo_hello_replace
[cgo] cp stdout compiled_cgo_sources.txt [cgo] cp stdout compiled_cgo_sources.txt
[cgo] go run ../print_line_comments.go compiled_cgo_sources.txt [cgo] go run ../print_line_comments.go compiled_cgo_sources.txt
[cgo] stdout $GOPATH[/\\]src[/\\]m[/\\]cgo_hello_replace[/\\]cgo_hello_replace.go [cgo] stdout $GOPATH[/\\]src[/\\]m[/\\]cgo_hello_replace[/\\]cgo_hello_replace.go
[cgo] ! stdout $GOPATH[/\\]src[/\\]m[/\\]overlay[/\\]hello.c [cgo] ! stdout $GOPATH[/\\]src[/\\]m[/\\]overlay[/\\]hello.c
# Change the contents of a file in the overlay and ensure that makes the target stale
env OLD_GOCACHE=$GOCACHE
env GOCACHE=$WORK/cache # use a fresh cache so that multiple runs of the test don't interfere
go build -x -overlay overlay.json ./test_cache
stderr '(compile|gccgo)( |\.exe).*test_cache.go'
go build -x -overlay overlay.json ./test_cache
! stderr '(compile|gccgo)( |\.exe).*test_cache.go' # cached
cp overlay/test_cache_different.go overlay/test_cache.go
go build -x -overlay overlay.json ./test_cache
stderr '(compile|gccgo)( |\.exe).*test_cache.go' # not cached
env CACHE=$OLD_GOCACHE
# Run same tests but with gccgo. # Run same tests but with gccgo.
env GO111MODULE=off env GO111MODULE=off
[!exec:gccgo] stop [!exec:gccgo] stop

View file

@ -0,0 +1,6 @@
# Test that go list prefix... does not read directories not beginning with prefix.
env GODEBUG=gofsystrace=1
go list m...
stderr mime
stderr mime[\\/]multipart
! stderr archive

View file

@ -0,0 +1,22 @@
[!net] skip
[!exec:git] skip
env GO111MODULE=off
cd rsc.io/go-get-issue-10952
exec git init
exec git add foo.go
exec git config user.name Gopher
exec git config user.email gopher@golang.org
exec git commit -a -m 'initial commit'
exec git remote add origin https://github.com/golang/go-get-issue-10952
exec git status
! go get -x -u rsc.io/go-get-issue-10952
stderr '^package rsc.io/go-get-issue-10952: rsc\.io/go-get-issue-10952 is a custom import path for https://github.com/rsc/go-get-issue-10952, but .* is checked out from https://github.com/golang/go-get-issue-10952$'
-- rsc.io/go-get-issue-10952/foo.go --
// Junk package to test go get.
package foo

6
src/cmd/go/testdata/script/index.txt vendored Normal file
View file

@ -0,0 +1,6 @@
# Check that standard library packages are cached.
go list -json math # refresh cache
env GODEBUG=gofsystrace=1,gofsystracelog=fsys.log
go list -json math
! grep math/abs.go fsys.log
grep 'openIndexPackage .*[\\/]math$' fsys.log

View file

@ -11,12 +11,11 @@ stdout '^example.com/noread$'
go list ./empty/... go list ./empty/...
stderr 'matched no packages' stderr 'matched no packages'
[root] stop # Root typically ignores file permissions.
# Make the directory ./noread unreadable, and verify that 'go list' reports an # Make the directory ./noread unreadable, and verify that 'go list' reports an
# explicit error for a pattern that should match it (rather than treating it as # explicit error for a pattern that should match it (rather than treating it as
# equivalent to an empty directory). # equivalent to an empty directory).
[root] stop # Root typically ignores file permissions.
[windows] skip # Does not have Unix-style directory permissions. [windows] skip # Does not have Unix-style directory permissions.
[plan9] skip # Might not have Unix-style directory permissions. [plan9] skip # Might not have Unix-style directory permissions.

View file

@ -0,0 +1,21 @@
[short] skip
[!exec:git] skip
[!net] skip
[!linux] skip # Uses XDG_CONFIG_HOME
env GIT_CONFIG_GLOBAL=$WORK/.gitconfig
env GOPROXY=direct
! go mod download
stderr '^go: github\.com/golang/notexist/subdir@v0.1.0: reading github\.com/golang/notexist/subdir/go\.mod at revision subdir/v0\.1\.0: '
-- go.mod --
module test
go 1.18
require github.com/golang/notexist/subdir v0.1.0
-- $WORK/.gitconfig --
[url "git@github.com:"]
insteadOf = https://github.com/

View file

@ -22,22 +22,18 @@ stderr '^If this is a private repository, see https://golang.org/doc/faq#git_htt
! stderr 'unknown revision' ! stderr 'unknown revision'
! stdout . ! stdout .
[!linux] stop # Needs XDG_CONFIG_HOME.
[!exec:false] stop [!exec:false] stop
# Test that Git clone errors will be shown to the user instead of a generic # Test that Git clone errors will be shown to the user instead of a generic
# "unknown revision" error. To do this we want to force git ls-remote to return # "unknown revision" error. To do this we want to force git ls-remote to return
# an error we don't already have special handling for. See golang/go#42751. # an error we don't already have special handling for. See golang/go#42751.
# env HOME=$WORK${/}home${/}gopher
# Set XDG_CONFIG_HOME to tell Git where to look for the git config file listed
# below, which turns on ssh.
env XDG_CONFIG_HOME=$TMPDIR
env GIT_SSH_COMMAND=false env GIT_SSH_COMMAND=false
! go install github.com/golang/nonexist@master ! go install github.com/golang/nonexist@master
stderr 'fatal: Could not read from remote repository.' stderr 'fatal: Could not read from remote repository.'
! stderr 'unknown revision' ! stderr 'unknown revision'
! stdout . ! stdout .
-- $TMPDIR/git/config -- -- $WORK/home/gopher/.gitconfig --
[url "git@github.com:"] [url "git@github.com:"]
insteadOf = https://github.com/ insteadOf = https://github.com/

23
src/cmd/go/testdata/script/mod_perm.txt vendored Normal file
View file

@ -0,0 +1,23 @@
# go list should work in ordinary conditions.
go list ./...
! stdout _data
# skip in conditions where chmod 0 may not work.
# plan9 should be fine, but copied from list_perm.txt unchanged.
[root] skip
[windows] skip
[plan9] skip
# go list should work with unreadable _data directory.
chmod 0 _data
go list ./...
! stdout _data
-- go.mod --
module m
-- x.go --
package m
-- _data/x.go --
package p

View file

@ -0,0 +1,11 @@
go list -m -u example.com/retract/noupgrade
stdout '^example.com/retract/noupgrade v1.0.0 \(retracted\)$'
-- go.mod --
module use
go 1.19
require example.com/retract/noupgrade v1.0.0
-- go.sum --
example.com/retract/noupgrade v1.0.0/go.mod h1:q2/HnBejUQ83RcUo4stf2U++/Zr9R/Ky3BsodjKBkQ4=

425
src/cmd/go/testdata/script/reuse_git.txt vendored Normal file
View file

@ -0,0 +1,425 @@
[short] skip
[!exec:git] skip
[!net] skip
env GO111MODULE=on
env GOPROXY=direct
env GOSUMDB=off
# go mod download with the pseudo-version should invoke git but not have a TagSum or Ref.
go mod download -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20170922010558-fc3a09f3dc5c
stderr 'git fetch'
cp stdout hellopseudo.json
! stdout '"(Query|TagPrefix|TagSum|Ref)"'
stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/hello"'
stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
go clean -modcache
# go mod download vcstest/hello should invoke git, print origin info
go mod download -x -json vcs-test.golang.org/git/hello.git@latest
stderr 'git fetch'
cp stdout hello.json
stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/hello"'
stdout '"Query": "latest"'
! stdout '"TagPrefix"'
stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
stdout '"Ref": "HEAD"'
stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
# pseudo-version again should not invoke git fetch (it has the version from the @latest query)
# but still be careful not to include a TagSum or a Ref, especially not Ref set to HEAD,
# which is easy to do when reusing the cached version from the @latest query.
go mod download -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20170922010558-fc3a09f3dc5c
! stderr 'git fetch'
cp stdout hellopseudo2.json
cmp hellopseudo.json hellopseudo2.json
# go mod download vcstest/hello@hash needs to check TagSum to find pseudoversion base.
go mod download -x -json vcs-test.golang.org/git/hello.git@fc3a09f3dc5c
! stderr 'git fetch'
cp stdout hellohash.json
stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
stdout '"Query": "fc3a09f3dc5c"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/hello"'
stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
# go mod download vcstest/hello/v9 should fail, still print origin info
! go mod download -x -json vcs-test.golang.org/git/hello.git/v9@latest
cp stdout hellov9.json
stdout '"Version": "latest"'
stdout '"Error":.*no matching versions'
! stdout '"TagPrefix"'
stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
! stdout '"(Ref|Hash|RepoSum)":'
# go mod download vcstest/hello/sub/v9 should also fail, print origin info with TagPrefix
! go mod download -x -json vcs-test.golang.org/git/hello.git/sub/v9@latest
cp stdout hellosubv9.json
stdout '"Version": "latest"'
stdout '"Error":.*no matching versions'
stdout '"TagPrefix": "sub/"'
stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
! stdout '"(Ref|Hash|RepoSum)":'
# go mod download vcstest/hello@nonexist should fail, still print origin info
! go mod download -x -json vcs-test.golang.org/git/hello.git@nonexist
cp stdout hellononexist.json
stdout '"Version": "nonexist"'
stdout '"Error":.*unknown revision nonexist'
stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
# go mod download vcstest/hello@1234567890123456789012345678901234567890 should fail, still print origin info
# (40 hex digits is assumed to be a full hash and is a slightly different code path from @nonexist)
! go mod download -x -json vcs-test.golang.org/git/hello.git@1234567890123456789012345678901234567890
cp stdout hellononhash.json
stdout '"Version": "1234567890123456789012345678901234567890"'
stdout '"Error":.*unknown revision 1234567890123456789012345678901234567890'
stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
# go mod download vcstest/hello@v0.0.0-20220101120101-123456789abc should fail, still print origin info
# (non-existent pseudoversion)
! go mod download -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20220101120101-123456789abc
cp stdout hellononpseudo.json
stdout '"Version": "v0.0.0-20220101120101-123456789abc"'
stdout '"Error":.*unknown revision 123456789abc'
stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
# go mod download vcstest/tagtests should invoke git, print origin info
go mod download -x -json vcs-test.golang.org/git/tagtests.git@latest
stderr 'git fetch'
cp stdout tagtests.json
stdout '"Version": "v0.2.2"'
stdout '"Query": "latest"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
! stdout '"TagPrefix"'
stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
stdout '"Ref": "refs/tags/v0.2.2"'
stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"'
# go mod download vcstest/tagtests@v0.2.2 should print origin info, no TagSum needed
go mod download -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
cp stdout tagtestsv022.json
stdout '"Version": "v0.2.2"'
! stdout '"Query":'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
! stdout '"TagPrefix"'
! stdout '"TagSum"'
stdout '"Ref": "refs/tags/v0.2.2"'
stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"'
# go mod download vcstest/tagtests@master needs a TagSum again
go mod download -x -json vcs-test.golang.org/git/tagtests.git@master
cp stdout tagtestsmaster.json
stdout '"Version": "v0.2.3-0.20190509225625-c7818c24fa2f"'
stdout '"Query": "master"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
! stdout '"TagPrefix"'
stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
stdout '"Ref": "refs/heads/master"'
stdout '"Hash": "c7818c24fa2f3f714c67d0a6d3e411c85a518d1f"'
# go mod download vcstest/prefixtagtests should invoke git, print origin info
go mod download -x -json vcs-test.golang.org/git/prefixtagtests.git/sub@latest
stderr 'git fetch'
cp stdout prefixtagtests.json
stdout '"Version": "v0.0.10"'
stdout '"Query": "latest"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/prefixtagtests"'
stdout '"Subdir": "sub"'
stdout '"TagPrefix": "sub/"'
stdout '"TagSum": "t1:YGSbWkJ8dn9ORAr[+]BlKHFK/2ZhXLb9hVuYfTZ9D8C7g="'
stdout '"Ref": "refs/tags/sub/v0.0.10"'
stdout '"Hash": "2b7c4692e12c109263cab51b416fcc835ddd7eae"'
# go mod download of a bunch of these should fail (some are invalid) but write good JSON for later
! go mod download -json vcs-test.golang.org/git/hello.git@latest vcs-test.golang.org/git/hello.git/v9@latest vcs-test.golang.org/git/hello.git/sub/v9@latest vcs-test.golang.org/git/tagtests.git@latest vcs-test.golang.org/git/tagtests.git@v0.2.2 vcs-test.golang.org/git/tagtests.git@master
cp stdout all.json
# clean the module cache, make sure that makes go mod download re-run git fetch, clean again
go clean -modcache
go mod download -x -json vcs-test.golang.org/git/hello.git@latest
stderr 'git fetch'
go clean -modcache
# reuse go mod download vcstest/hello result
go mod download -reuse=hello.json -x -json vcs-test.golang.org/git/hello.git@latest
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/hello"'
! stdout '"TagPrefix"'
stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
stdout '"Ref": "HEAD"'
stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
! stdout '"Dir"'
! stdout '"Info"'
! stdout '"GoMod"'
! stdout '"Zip"'
# reuse go mod download vcstest/hello pseudoversion result
go mod download -reuse=hellopseudo.json -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20170922010558-fc3a09f3dc5c
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/hello"'
! stdout '"(Query|TagPrefix|TagSum|Ref)"'
stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse go mod download vcstest/hello@hash
go mod download -reuse=hellohash.json -x -json vcs-test.golang.org/git/hello.git@fc3a09f3dc5c
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Query": "fc3a09f3dc5c"'
stdout '"Version": "v0.0.0-20170922010558-fc3a09f3dc5c"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/hello"'
! stdout '"(TagPrefix|Ref)"'
stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
stdout '"Hash": "fc3a09f3dc5cfe0d7a743ea18f1f5226e68b3777"'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse go mod download vcstest/hello/v9 error result
! go mod download -reuse=hellov9.json -x -json vcs-test.golang.org/git/hello.git/v9@latest
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Error":.*no matching versions'
! stdout '"TagPrefix"'
stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
! stdout '"(Ref|Hash)":'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse go mod download vcstest/hello/sub/v9 error result
! go mod download -reuse=hellosubv9.json -x -json vcs-test.golang.org/git/hello.git/sub/v9@latest
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Error":.*no matching versions'
stdout '"TagPrefix": "sub/"'
stdout '"TagSum": "t1:47DEQpj8HBSa[+]/TImW[+]5JCeuQeRkm5NMpJWZG3hSuFU="'
! stdout '"(Ref|Hash)":'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse go mod download vcstest/hello@nonexist
! go mod download -reuse=hellononexist.json -x -json vcs-test.golang.org/git/hello.git@nonexist
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Version": "nonexist"'
stdout '"Error":.*unknown revision nonexist'
stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse go mod download vcstest/hello@1234567890123456789012345678901234567890
! go mod download -reuse=hellononhash.json -x -json vcs-test.golang.org/git/hello.git@1234567890123456789012345678901234567890
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Version": "1234567890123456789012345678901234567890"'
stdout '"Error":.*unknown revision 1234567890123456789012345678901234567890'
stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse go mod download vcstest/hello@v0.0.0-20220101120101-123456789abc
! go mod download -reuse=hellononpseudo.json -x -json vcs-test.golang.org/git/hello.git@v0.0.0-20220101120101-123456789abc
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Version": "v0.0.0-20220101120101-123456789abc"'
stdout '"Error":.*unknown revision 123456789abc'
stdout '"RepoSum": "r1:c0/9JCZ25lxoBiK3[+]3BhACU4giH49flcJmBynJ[+]Jvmc="'
! stdout '"(TagPrefix|TagSum|Ref|Hash)"'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse go mod download vcstest/tagtests result
go mod download -reuse=tagtests.json -x -json vcs-test.golang.org/git/tagtests.git@latest
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Version": "v0.2.2"'
stdout '"Query": "latest"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
! stdout '"TagPrefix"'
stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
stdout '"Ref": "refs/tags/v0.2.2"'
stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse go mod download vcstest/tagtests@v0.2.2 result
go mod download -reuse=tagtestsv022.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Version": "v0.2.2"'
! stdout '"Query":'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
! stdout '"TagPrefix"'
! stdout '"TagSum"'
stdout '"Ref": "refs/tags/v0.2.2"'
stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse go mod download vcstest/tagtests@master result
go mod download -reuse=tagtestsmaster.json -x -json vcs-test.golang.org/git/tagtests.git@master
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Version": "v0.2.3-0.20190509225625-c7818c24fa2f"'
stdout '"Query": "master"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
! stdout '"TagPrefix"'
stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
stdout '"Ref": "refs/heads/master"'
stdout '"Hash": "c7818c24fa2f3f714c67d0a6d3e411c85a518d1f"'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse go mod download vcstest/tagtests@master result again with all.json
go mod download -reuse=all.json -x -json vcs-test.golang.org/git/tagtests.git@master
! stderr 'git fetch'
stdout '"Reuse": true'
stdout '"Version": "v0.2.3-0.20190509225625-c7818c24fa2f"'
stdout '"Query": "master"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
! stdout '"TagPrefix"'
stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
stdout '"Ref": "refs/heads/master"'
stdout '"Hash": "c7818c24fa2f3f714c67d0a6d3e411c85a518d1f"'
! stdout '"(Dir|Info|GoMod|Zip)"'
# go mod download vcstest/prefixtagtests result with json
go mod download -reuse=prefixtagtests.json -x -json vcs-test.golang.org/git/prefixtagtests.git/sub@latest
! stderr 'git fetch'
stdout '"Version": "v0.0.10"'
stdout '"Query": "latest"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/prefixtagtests"'
stdout '"Subdir": "sub"'
stdout '"TagPrefix": "sub/"'
stdout '"TagSum": "t1:YGSbWkJ8dn9ORAr[+]BlKHFK/2ZhXLb9hVuYfTZ9D8C7g="'
stdout '"Ref": "refs/tags/sub/v0.0.10"'
stdout '"Hash": "2b7c4692e12c109263cab51b416fcc835ddd7eae"'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse the bulk results with all.json
! go mod download -reuse=all.json -json vcs-test.golang.org/git/hello.git@latest vcs-test.golang.org/git/hello.git/v9@latest vcs-test.golang.org/git/hello.git/sub/v9@latest vcs-test.golang.org/git/tagtests.git@latest vcs-test.golang.org/git/tagtests.git@v0.2.2 vcs-test.golang.org/git/tagtests.git@master
! stderr 'git fetch'
stdout '"Reuse": true'
! stdout '"(Dir|Info|GoMod|Zip)"'
# reuse attempt with stale hash should reinvoke git, not report reuse
go mod download -reuse=tagtestsv022badhash.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
stderr 'git fetch'
! stdout '"Reuse": true'
stdout '"Version": "v0.2.2"'
! stdout '"Query"'
stdout '"VCS": "git"'
stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
! stdout '"(TagPrefix|TagSum)"'
stdout '"Ref": "refs/tags/v0.2.2"'
stdout '"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"'
stdout '"Dir"'
stdout '"Info"'
stdout '"GoMod"'
stdout '"Zip"'
# reuse with stale repo URL
go mod download -reuse=tagtestsv022badurl.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
! stdout '"Reuse": true'
stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
stdout '"Dir"'
stdout '"Info"'
stdout '"GoMod"'
stdout '"Zip"'
# reuse with stale VCS
go mod download -reuse=tagtestsv022badvcs.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
! stdout '"Reuse": true'
stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
# reuse with stale Dir
go mod download -reuse=tagtestsv022baddir.json -x -json vcs-test.golang.org/git/tagtests.git@v0.2.2
! stdout '"Reuse": true'
stdout '"URL": "https://vcs-test.golang.org/git/tagtests"'
# reuse with stale TagSum
go mod download -reuse=tagtestsbadtagsum.json -x -json vcs-test.golang.org/git/tagtests.git@latest
! stdout '"Reuse": true'
stdout '"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo="'
-- tagtestsv022badhash.json --
{
"Path": "vcs-test.golang.org/git/tagtests.git",
"Version": "v0.2.2",
"Origin": {
"VCS": "git",
"URL": "https://vcs-test.golang.org/git/tagtests",
"Ref": "refs/tags/v0.2.2",
"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952XXX"
}
}
-- tagtestsbadtagsum.json --
{
"Path": "vcs-test.golang.org/git/tagtests.git",
"Version": "v0.2.2",
"Query": "latest",
"Origin": {
"VCS": "git",
"URL": "https://vcs-test.golang.org/git/tagtests",
"TagSum": "t1:Dp7yRKDuE8WjG0429PN9hYWjqhy2te7P9Oki/sMEOGo=XXX",
"Ref": "refs/tags/v0.2.2",
"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"
},
"Reuse": true
}
-- tagtestsv022badvcs.json --
{
"Path": "vcs-test.golang.org/git/tagtests.git",
"Version": "v0.2.2",
"Origin": {
"VCS": "gitXXX",
"URL": "https://vcs-test.golang.org/git/tagtests",
"Ref": "refs/tags/v0.2.2",
"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"
}
}
-- tagtestsv022baddir.json --
{
"Path": "vcs-test.golang.org/git/tagtests.git",
"Version": "v0.2.2",
"Origin": {
"VCS": "git",
"URL": "https://vcs-test.golang.org/git/tagtests",
"Subdir": "subdir",
"Ref": "refs/tags/v0.2.2",
"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"
}
}
-- tagtestsv022badurl.json --
{
"Path": "vcs-test.golang.org/git/tagtests.git",
"Version": "v0.2.2",
"Origin": {
"VCS": "git",
"URL": "https://vcs-test.golang.org/git/tagtestsXXX",
"Ref": "refs/tags/v0.2.2",
"Hash": "59356c8cd18c5fe9a598167d98a6843e52d57952"
}
}

View file

@ -17,15 +17,12 @@ go clean -cache
exists $GOCACHE/fuzz exists $GOCACHE/fuzz
# 'go clean -fuzzcache' should delete the fuzz cache but not the build cache. # 'go clean -fuzzcache' should delete the fuzz cache but not the build cache.
go list -f {{.Stale}} ./empty go build -x ./empty
stdout true stderr '(compile|gccgo)( |\.exe).*empty.go'
go install ./empty
go list -f {{.Stale}} ./empty
stdout false
go clean -fuzzcache go clean -fuzzcache
! exists $GOCACHE/fuzz ! exists $GOCACHE/fuzz
go list -f {{.Stale}} ./empty go build -x ./empty
stdout false ! stderr '(compile|gccgo)( |\.exe).*empty.go'
# Fuzzing indicates that one new interesting value was found with an empty # Fuzzing indicates that one new interesting value was found with an empty
# corpus, and the total size of the cache is now 1. # corpus, and the total size of the cache is now 1.

View file

@ -0,0 +1,59 @@
go work init
go work use . ./sub
# Verify that the go.mod files for both modules in the workspace are tidy,
# and add missing go.sum entries as needed.
cp go.mod go.mod.orig
go mod tidy
cmp go.mod go.mod.orig
cd sub
cp go.mod go.mod.orig
go mod tidy
cmp go.mod go.mod.orig
cd ..
go list -m all
stdout '^rsc\.io/quote v1\.5\.1$'
stdout '^rsc\.io/sampler v1\.3\.1$'
# Now remove the module dependencies from the module cache.
# Because one module upgrades a transitive dependency needed by another,
# listing the modules in the workspace should error out.
go clean -modcache
env GOPROXY=off
! go list -m all
stderr '^go: rsc.io/sampler@v1.3.0: module lookup disabled by GOPROXY=off$'
-- example.go --
package example
import _ "rsc.io/sampler"
-- go.mod --
module example
go 1.19
require rsc.io/sampler v1.3.0
require (
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c // indirect
rsc.io/testonly v1.0.0 // indirect
)
-- sub/go.mod --
module example/sub
go 1.19
require rsc.io/quote v1.5.1
require (
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c // indirect
rsc.io/sampler v1.3.1 // indirect
)
-- sub/sub.go --
package example
import _ "rsc.io/quote"

View file

@ -7,13 +7,19 @@ exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod
exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip
! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info ! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info
! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod ! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod
grep '^rsc\.io/quote v1\.5\.2/go\.mod h1:' go.work.sum
grep '^rsc\.io/quote v1\.5\.2 h1:' go.work.sum
go clean -modcache
rm go.work.sum
go mod download go mod download
exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info
exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod
exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip
! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info ! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info
! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod ! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod
grep '^rsc\.io/quote v1\.5\.2/go\.mod h1:' go.work.sum
grep '^rsc\.io/quote v1\.5\.2 h1:' go.work.sum
go mod why rsc.io/quote go mod why rsc.io/quote
stdout '# rsc.io/quote\nexample.com/a\nrsc.io/quote' stdout '# rsc.io/quote\nexample.com/a\nrsc.io/quote'
@ -25,8 +31,8 @@ stdout 'example.com/a rsc.io/quote@v1.5.2\nexample.com/b example.com/c@v1.0.0\nr
go 1.18 go 1.18
use ( use (
./a ./a
./b ./b
) )
-- a/go.mod -- -- a/go.mod --
go 1.18 go 1.18

View file

@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !purego
// +build !purego
// SHA256 block routine. See sha256block.go for Go equivalent. // SHA256 block routine. See sha256block.go for Go equivalent.
// //
// The algorithm is detailed in FIPS 180-4: // The algorithm is detailed in FIPS 180-4:

View file

@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !purego
// +build !purego
package notsha256 package notsha256
var useAVX2 = false var useAVX2 = false

View file

@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !purego
// +build !purego
#include "textflag.h" #include "textflag.h"
// SHA256 block routine. See sha256block.go for Go equivalent. // SHA256 block routine. See sha256block.go for Go equivalent.

View file

@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build 386 || amd64 || ppc64le || ppc64 //go:build !purego && (386 || amd64 || ppc64le || ppc64)
// +build !purego
// +build 386 amd64 ppc64le ppc64 // +build 386 amd64 ppc64le ppc64
package notsha256 package notsha256

View file

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !amd64 && !386 && !ppc64le && !ppc64 //go:build purego || (!amd64 && !386 && !ppc64le && !ppc64)
// +build !amd64,!386,!ppc64le,!ppc64 // +build purego !amd64,!386,!ppc64le,!ppc64
package notsha256 package notsha256

View file

@ -8,7 +8,8 @@
// bootstrap toolchain. // bootstrap toolchain.
// //
//go:build ppc64 || ppc64le //go:build !purego && (ppc64 || ppc64le)
// +build !purego
// +build ppc64 ppc64le // +build ppc64 ppc64le
// Based on CRYPTOGAMS code with the following comment: // Based on CRYPTOGAMS code with the following comment:

View file

@ -1426,10 +1426,23 @@ func (ctxt *Link) hostlink() {
argv = append(argv, "-Wl,-pagezero_size,4000000") argv = append(argv, "-Wl,-pagezero_size,4000000")
} }
} }
if *flagRace && ctxt.HeadType == objabi.Hwindows {
// Current windows/amd64 race detector tsan support
// library can't handle PIE mode (see #53539 for more details).
// For now, explicitly disable PIE (since some compilers
// default to it) if -race is in effect.
argv = addASLRargs(argv, false)
}
case BuildModePIE: case BuildModePIE:
switch ctxt.HeadType { switch ctxt.HeadType {
case objabi.Hdarwin, objabi.Haix: case objabi.Hdarwin, objabi.Haix:
case objabi.Hwindows: case objabi.Hwindows:
if *flagAslr && *flagRace {
// Current windows/amd64 race detector tsan support
// library can't handle PIE mode (see #53539 for more details).
// Disable alsr if -race in effect.
*flagAslr = false
}
argv = addASLRargs(argv, *flagAslr) argv = addASLRargs(argv, *flagAslr)
default: default:
// ELF. // ELF.

View file

@ -250,23 +250,14 @@ func testGoLib(t *testing.T, iscgo bool) {
t.Fatal(err) t.Fatal(err)
} }
args := []string{"install", "mylib"} cmd := exec.Command(testenv.GoToolPath(t), "build", "-buildmode=archive", "-o", "mylib.a", ".")
cmd := exec.Command(testenv.GoToolPath(t), args...)
cmd.Dir = libpath cmd.Dir = libpath
cmd.Env = append(os.Environ(), "GOPATH="+gopath) cmd.Env = append(os.Environ(), "GOPATH="+gopath)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
t.Fatalf("building test lib failed: %s %s", err, out) t.Fatalf("building test lib failed: %s %s", err, out)
} }
pat := filepath.Join(gopath, "pkg", "*", "mylib.a") mylib := filepath.Join(libpath, "mylib.a")
ms, err := filepath.Glob(pat)
if err != nil {
t.Fatal(err)
}
if len(ms) == 0 {
t.Fatalf("cannot found paths for pattern %s", pat)
}
mylib := ms[0]
out, err = exec.Command(testnmpath, mylib).CombinedOutput() out, err = exec.Command(testnmpath, mylib).CombinedOutput()
if err != nil { if err != nil {

View file

@ -247,7 +247,7 @@ p { color: grey85; font-size:85%; }
because it made a system call or tried to acquire a mutex. because it made a system call or tried to acquire a mutex.
Directly underneath each bar, a smaller bar or more commonly a fine Directly underneath each bar, a smaller bar or more commonly a fine
vertical line indicates an event occuring during its execution. vertical line indicates an event occurring during its execution.
Some of these are related to garbage collection; most indicate that Some of these are related to garbage collection; most indicate that
a goroutine yielded its logical processor but then immediately resumed execution a goroutine yielded its logical processor but then immediately resumed execution
on the same logical processor. Clicking on the event displays the stack trace on the same logical processor. Clicking on the event displays the stack trace
@ -274,7 +274,7 @@ p { color: grey85; font-size:85%; }
function written in C. function written in C.
</p> </p>
<p> <p>
Above the event trace for the first logical processor are Above the event trace for the first logical processor are
traces for various runtime-internal events. traces for various runtime-internal events.
The "GC" bar shows when the garbage collector is running, and in which stage. The "GC" bar shows when the garbage collector is running, and in which stage.

View file

@ -571,7 +571,7 @@ func generateTrace(params *traceParams, consumer traceConsumer) error {
fname := stk[0].Fn fname := stk[0].Fn
info.name = fmt.Sprintf("G%v %s", newG, fname) info.name = fmt.Sprintf("G%v %s", newG, fname)
info.isSystemG = isSystemGoroutine(fname) info.isSystemG = trace.IsSystemGoroutine(fname)
ctx.gcount++ ctx.gcount++
setGState(ev, newG, gDead, gRunnable) setGState(ev, newG, gDead, gRunnable)
@ -1129,12 +1129,6 @@ func (ctx *traceContext) buildBranch(parent frameNode, stk []*trace.Frame) int {
return ctx.buildBranch(node, stk) return ctx.buildBranch(node, stk)
} }
func isSystemGoroutine(entryFn string) bool {
// This mimics runtime.isSystemGoroutine as closely as
// possible.
return entryFn != "runtime.main" && strings.HasPrefix(entryFn, "runtime.")
}
// firstTimestamp returns the timestamp of the first event record. // firstTimestamp returns the timestamp of the first event record.
func firstTimestamp() int64 { func firstTimestamp() int64 {
res, _ := parseTrace() res, _ := parseTrace()

View file

@ -248,42 +248,40 @@ func (z *Reader) Read(p []byte) (n int, err error) {
return 0, z.err return 0, z.err
} }
n, z.err = z.decompressor.Read(p) for n == 0 {
z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) n, z.err = z.decompressor.Read(p)
z.size += uint32(n) z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
if z.err != io.EOF { z.size += uint32(n)
// In the normal case we return here. if z.err != io.EOF {
return n, z.err // In the normal case we return here.
return n, z.err
}
// Finished file; check checksum and size.
if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
z.err = noEOF(err)
return n, z.err
}
digest := le.Uint32(z.buf[:4])
size := le.Uint32(z.buf[4:8])
if digest != z.digest || size != z.size {
z.err = ErrChecksum
return n, z.err
}
z.digest, z.size = 0, 0
// File is ok; check if there is another.
if !z.multistream {
return n, io.EOF
}
z.err = nil // Remove io.EOF
if _, z.err = z.readHeader(); z.err != nil {
return n, z.err
}
} }
// Finished file; check checksum and size. return n, nil
if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
z.err = noEOF(err)
return n, z.err
}
digest := le.Uint32(z.buf[:4])
size := le.Uint32(z.buf[4:8])
if digest != z.digest || size != z.size {
z.err = ErrChecksum
return n, z.err
}
z.digest, z.size = 0, 0
// File is ok; check if there is another.
if !z.multistream {
return n, io.EOF
}
z.err = nil // Remove io.EOF
if _, z.err = z.readHeader(); z.err != nil {
return n, z.err
}
// Read from next file, if necessary.
if n > 0 {
return n, nil
}
return z.Read(p)
} }
// Close closes the Reader. It does not close the underlying io.Reader. // Close closes the Reader. It does not close the underlying io.Reader.

View file

@ -569,3 +569,19 @@ func TestTruncatedStreams(t *testing.T) {
} }
} }
} }
func TestCVE202230631(t *testing.T) {
var empty = []byte{0x1f, 0x8b, 0x08, 0x00, 0xa7, 0x8f, 0x43, 0x62, 0x00,
0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
r := bytes.NewReader(bytes.Repeat(empty, 4e6))
z, err := NewReader(r)
if err != nil {
t.Fatalf("NewReader: got %v, want nil", err)
}
// Prior to CVE-2022-30631 fix, this would cause an unrecoverable panic due
// to stack exhaustion.
_, err = z.Read(make([]byte, 10))
if err != io.EOF {
t.Errorf("Reader.Read: got %v, want %v", err, io.EOF)
}
}

View file

@ -1008,22 +1008,22 @@ func ParseRevocationList(der []byte) (*RevocationList, error) {
// we can populate RevocationList.Raw, before unwrapping the // we can populate RevocationList.Raw, before unwrapping the
// SEQUENCE so it can be operated on // SEQUENCE so it can be operated on
if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) { if !input.ReadASN1Element(&input, cryptobyte_asn1.SEQUENCE) {
return nil, errors.New("x509: malformed certificate") return nil, errors.New("x509: malformed crl")
} }
rl.Raw = input rl.Raw = input
if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) {
return nil, errors.New("x509: malformed certificate") return nil, errors.New("x509: malformed crl")
} }
var tbs cryptobyte.String var tbs cryptobyte.String
// do the same trick again as above to extract the raw // do the same trick again as above to extract the raw
// bytes for Certificate.RawTBSCertificate // bytes for Certificate.RawTBSCertificate
if !input.ReadASN1Element(&tbs, cryptobyte_asn1.SEQUENCE) { if !input.ReadASN1Element(&tbs, cryptobyte_asn1.SEQUENCE) {
return nil, errors.New("x509: malformed tbs certificate") return nil, errors.New("x509: malformed tbs crl")
} }
rl.RawTBSRevocationList = tbs rl.RawTBSRevocationList = tbs
if !tbs.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) { if !tbs.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) {
return nil, errors.New("x509: malformed tbs certificate") return nil, errors.New("x509: malformed tbs crl")
} }
var version int var version int
@ -1106,13 +1106,10 @@ func ParseRevocationList(der []byte) (*RevocationList, error) {
} }
var extensions cryptobyte.String var extensions cryptobyte.String
var present bool var present bool
if !tbs.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.SEQUENCE) { if !certSeq.ReadOptionalASN1(&extensions, &present, cryptobyte_asn1.SEQUENCE) {
return nil, errors.New("x509: malformed extensions") return nil, errors.New("x509: malformed extensions")
} }
if present { if present {
if !extensions.ReadASN1(&extensions, cryptobyte_asn1.SEQUENCE) {
return nil, errors.New("x509: malformed extensions")
}
for !extensions.Empty() { for !extensions.Empty() {
var extension cryptobyte.String var extension cryptobyte.String
if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) { if !extensions.ReadASN1(&extension, cryptobyte_asn1.SEQUENCE) {
@ -1148,6 +1145,15 @@ func ParseRevocationList(der []byte) (*RevocationList, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if ext.Id.Equal(oidExtensionAuthorityKeyId) {
rl.AuthorityKeyId = ext.Value
} else if ext.Id.Equal(oidExtensionCRLNumber) {
value := cryptobyte.String(ext.Value)
rl.Number = new(big.Int)
if !value.ReadASN1Integer(rl.Number) {
return nil, errors.New("x509: malformed crl number")
}
}
rl.Extensions = append(rl.Extensions, ext) rl.Extensions = append(rl.Extensions, ext)
} }
} }

View file

@ -2109,7 +2109,9 @@ type RevocationList struct {
// Issuer contains the DN of the issuing certificate. // Issuer contains the DN of the issuing certificate.
Issuer pkix.Name Issuer pkix.Name
// AuthorityKeyId is used to identify the public key associated with the // AuthorityKeyId is used to identify the public key associated with the
// issuing certificate. // issuing certificate. It is populated from the authorityKeyIdentifier
// extension when parsing a CRL. It is ignored when creating a CRL; the
// extension is populated from the issuing certificate itself.
AuthorityKeyId []byte AuthorityKeyId []byte
Signature []byte Signature []byte
@ -2125,7 +2127,8 @@ type RevocationList struct {
// Number is used to populate the X.509 v2 cRLNumber extension in the CRL, // Number is used to populate the X.509 v2 cRLNumber extension in the CRL,
// which should be a monotonically increasing sequence number for a given // which should be a monotonically increasing sequence number for a given
// CRL scope and CRL issuer. // CRL scope and CRL issuer. It is also populated from the cRLNumber
// extension when parsing a CRL.
Number *big.Int Number *big.Int
// ThisUpdate is used to populate the thisUpdate field in the CRL, which // ThisUpdate is used to populate the thisUpdate field in the CRL, which
@ -2193,6 +2196,10 @@ func CreateRevocationList(rand io.Reader, template *RevocationList, issuer *Cert
if err != nil { if err != nil {
return nil, err return nil, err
} }
if numBytes := template.Number.Bytes(); len(numBytes) > 20 || (len(numBytes) == 20 && numBytes[0]&0x80 != 0) {
return nil, errors.New("x509: CRL number exceeds 20 octets")
}
crlNum, err := asn1.Marshal(template.Number) crlNum, err := asn1.Marshal(template.Number)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -2478,6 +2478,40 @@ func TestCreateRevocationList(t *testing.T) {
}, },
expectedError: "x509: template contains nil Number field", expectedError: "x509: template contains nil Number field",
}, },
{
name: "long Number",
key: ec256Priv,
issuer: &Certificate{
KeyUsage: KeyUsageCRLSign,
Subject: pkix.Name{
CommonName: "testing",
},
SubjectKeyId: []byte{1, 2, 3},
},
template: &RevocationList{
ThisUpdate: time.Time{}.Add(time.Hour * 24),
NextUpdate: time.Time{}.Add(time.Hour * 48),
Number: big.NewInt(0).SetBytes(append([]byte{1}, make([]byte, 20)...)),
},
expectedError: "x509: CRL number exceeds 20 octets",
},
{
name: "long Number (20 bytes, MSB set)",
key: ec256Priv,
issuer: &Certificate{
KeyUsage: KeyUsageCRLSign,
Subject: pkix.Name{
CommonName: "testing",
},
SubjectKeyId: []byte{1, 2, 3},
},
template: &RevocationList{
ThisUpdate: time.Time{}.Add(time.Hour * 24),
NextUpdate: time.Time{}.Add(time.Hour * 48),
Number: big.NewInt(0).SetBytes(append([]byte{255}, make([]byte, 19)...)),
},
expectedError: "x509: CRL number exceeds 20 octets",
},
{ {
name: "invalid signature algorithm", name: "invalid signature algorithm",
key: ec256Priv, key: ec256Priv,
@ -2524,6 +2558,34 @@ func TestCreateRevocationList(t *testing.T) {
NextUpdate: time.Time{}.Add(time.Hour * 48), NextUpdate: time.Time{}.Add(time.Hour * 48),
}, },
}, },
{
name: "valid, extra entry extension",
key: ec256Priv,
issuer: &Certificate{
KeyUsage: KeyUsageCRLSign,
Subject: pkix.Name{
CommonName: "testing",
},
SubjectKeyId: []byte{1, 2, 3},
},
template: &RevocationList{
RevokedCertificates: []pkix.RevokedCertificate{
{
SerialNumber: big.NewInt(2),
RevocationTime: time.Time{}.Add(time.Hour),
Extensions: []pkix.Extension{
{
Id: []int{2, 5, 29, 99},
Value: []byte{5, 0},
},
},
},
},
Number: big.NewInt(5),
ThisUpdate: time.Time{}.Add(time.Hour * 24),
NextUpdate: time.Time{}.Add(time.Hour * 48),
},
},
{ {
name: "valid, Ed25519 key", name: "valid, Ed25519 key",
key: ed25519Priv, key: ed25519Priv,
@ -2681,6 +2743,19 @@ func TestCreateRevocationList(t *testing.T) {
t.Fatalf("Extensions mismatch: got %v; want %v.", t.Fatalf("Extensions mismatch: got %v; want %v.",
parsedCRL.Extensions[2:], tc.template.ExtraExtensions) parsedCRL.Extensions[2:], tc.template.ExtraExtensions)
} }
if tc.template.Number != nil && parsedCRL.Number == nil {
t.Fatalf("Generated CRL missing Number: got nil, want %s",
tc.template.Number.String())
}
if tc.template.Number != nil && tc.template.Number.Cmp(parsedCRL.Number) != 0 {
t.Fatalf("Generated CRL has wrong Number: got %s, want %s",
parsedCRL.Number.String(), tc.template.Number.String())
}
if !bytes.Equal(parsedCRL.AuthorityKeyId, expectedAKI) {
t.Fatalf("Generated CRL has wrong Number: got %x, want %x",
parsedCRL.AuthorityKeyId, expectedAKI)
}
}) })
} }
} }

View file

@ -449,6 +449,16 @@ func TestQueryContextWait(t *testing.T) {
// TestTxContextWait tests the transaction behavior when the tx context is canceled // TestTxContextWait tests the transaction behavior when the tx context is canceled
// during execution of the query. // during execution of the query.
func TestTxContextWait(t *testing.T) { func TestTxContextWait(t *testing.T) {
testContextWait(t, false)
}
// TestTxContextWaitNoDiscard is the same as TestTxContextWait, but should not discard
// the final connection.
func TestTxContextWaitNoDiscard(t *testing.T) {
testContextWait(t, true)
}
func testContextWait(t *testing.T, keepConnOnRollback bool) {
db := newTestDB(t, "people") db := newTestDB(t, "people")
defer closeDB(t, db) defer closeDB(t, db)
@ -458,7 +468,7 @@ func TestTxContextWait(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
tx.keepConnOnRollback = false tx.keepConnOnRollback = keepConnOnRollback
tx.dc.ci.(*fakeConn).waiter = func(c context.Context) { tx.dc.ci.(*fakeConn).waiter = func(c context.Context) {
cancel() cancel()
@ -472,36 +482,11 @@ func TestTxContextWait(t *testing.T) {
t.Fatalf("expected QueryContext to error with context canceled but returned %v", err) t.Fatalf("expected QueryContext to error with context canceled but returned %v", err)
} }
waitForFree(t, db, 0) if keepConnOnRollback {
} waitForFree(t, db, 1)
} else {
// TestTxContextWaitNoDiscard is the same as TestTxContextWait, but should not discard waitForFree(t, db, 0)
// the final connection.
func TestTxContextWaitNoDiscard(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Millisecond)
defer cancel()
tx, err := db.BeginTx(ctx, nil)
if err != nil {
// Guard against the context being canceled before BeginTx completes.
if err == context.DeadlineExceeded {
t.Skip("tx context canceled prior to first use")
}
t.Fatal(err)
} }
// This will trigger the *fakeConn.Prepare method which will take time
// performing the query. The ctxDriverPrepare func will check the context
// after this and close the rows and return an error.
_, err = tx.QueryContext(ctx, "WAIT|1s|SELECT|people|age,name|")
if err != context.DeadlineExceeded {
t.Fatalf("expected QueryContext to error with context deadline exceeded but returned %v", err)
}
waitForFree(t, db, 1)
} }
// TestUnsupportedOptions checks that the database fails when a driver that // TestUnsupportedOptions checks that the database fails when a driver that

View file

@ -871,8 +871,13 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg
return &op return &op
} }
var maxIgnoreNestingDepth = 10000
// decIgnoreOpFor returns the decoding op for a field that has no destination. // decIgnoreOpFor returns the decoding op for a field that has no destination.
func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp) *decOp { func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp, depth int) *decOp {
if depth > maxIgnoreNestingDepth {
error_(errors.New("invalid nesting depth"))
}
// If this type is already in progress, it's a recursive type (e.g. map[string]*T). // If this type is already in progress, it's a recursive type (e.g. map[string]*T).
// Return the pointer to the op we're already building. // Return the pointer to the op we're already building.
if opPtr := inProgress[wireId]; opPtr != nil { if opPtr := inProgress[wireId]; opPtr != nil {
@ -896,7 +901,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp)
errorf("bad data: undefined type %s", wireId.string()) errorf("bad data: undefined type %s", wireId.string())
case wire.ArrayT != nil: case wire.ArrayT != nil:
elemId := wire.ArrayT.Elem elemId := wire.ArrayT.Elem
elemOp := dec.decIgnoreOpFor(elemId, inProgress) elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
op = func(i *decInstr, state *decoderState, value reflect.Value) { op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len) state.dec.ignoreArray(state, *elemOp, wire.ArrayT.Len)
} }
@ -904,15 +909,15 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId, inProgress map[typeId]*decOp)
case wire.MapT != nil: case wire.MapT != nil:
keyId := dec.wireType[wireId].MapT.Key keyId := dec.wireType[wireId].MapT.Key
elemId := dec.wireType[wireId].MapT.Elem elemId := dec.wireType[wireId].MapT.Elem
keyOp := dec.decIgnoreOpFor(keyId, inProgress) keyOp := dec.decIgnoreOpFor(keyId, inProgress, depth+1)
elemOp := dec.decIgnoreOpFor(elemId, inProgress) elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
op = func(i *decInstr, state *decoderState, value reflect.Value) { op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.ignoreMap(state, *keyOp, *elemOp) state.dec.ignoreMap(state, *keyOp, *elemOp)
} }
case wire.SliceT != nil: case wire.SliceT != nil:
elemId := wire.SliceT.Elem elemId := wire.SliceT.Elem
elemOp := dec.decIgnoreOpFor(elemId, inProgress) elemOp := dec.decIgnoreOpFor(elemId, inProgress, depth+1)
op = func(i *decInstr, state *decoderState, value reflect.Value) { op = func(i *decInstr, state *decoderState, value reflect.Value) {
state.dec.ignoreSlice(state, *elemOp) state.dec.ignoreSlice(state, *elemOp)
} }
@ -1073,7 +1078,7 @@ func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *de
func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine { func (dec *Decoder) compileIgnoreSingle(remoteId typeId) *decEngine {
engine := new(decEngine) engine := new(decEngine)
engine.instr = make([]decInstr, 1) // one item engine.instr = make([]decInstr, 1) // one item
op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp)) op := dec.decIgnoreOpFor(remoteId, make(map[typeId]*decOp), 0)
ovfl := overflow(dec.typeString(remoteId)) ovfl := overflow(dec.typeString(remoteId))
engine.instr[0] = decInstr{*op, 0, nil, ovfl} engine.instr[0] = decInstr{*op, 0, nil, ovfl}
engine.numInstr = 1 engine.numInstr = 1
@ -1118,7 +1123,7 @@ func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEn
localField, present := srt.FieldByName(wireField.Name) localField, present := srt.FieldByName(wireField.Name)
// TODO(r): anonymous names // TODO(r): anonymous names
if !present || !isExported(wireField.Name) { if !present || !isExported(wireField.Name) {
op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp)) op := dec.decIgnoreOpFor(wireField.Id, make(map[typeId]*decOp), 0)
engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl} engine.instr[fieldnum] = decInstr{*op, fieldnum, nil, ovfl}
continue continue
} }

View file

@ -12,6 +12,7 @@ import (
"fmt" "fmt"
"io" "io"
"net" "net"
"reflect"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -796,3 +797,26 @@ func TestNetIP(t *testing.T) {
t.Errorf("decoded to %v, want 1.2.3.4", ip.String()) t.Errorf("decoded to %v, want 1.2.3.4", ip.String())
} }
} }
func TestIgnoreDepthLimit(t *testing.T) {
// We don't test the actual depth limit because it requires building an
// extremely large message, which takes quite a while.
oldNestingDepth := maxIgnoreNestingDepth
maxIgnoreNestingDepth = 100
defer func() { maxIgnoreNestingDepth = oldNestingDepth }()
b := new(bytes.Buffer)
enc := NewEncoder(b)
typ := reflect.TypeOf(int(0))
nested := reflect.ArrayOf(1, typ)
for i := 0; i < 100; i++ {
nested = reflect.ArrayOf(1, nested)
}
badStruct := reflect.New(reflect.StructOf([]reflect.StructField{{Name: "F", Type: nested}}))
enc.Encode(badStruct.Interface())
dec := NewDecoder(b)
var output struct{ Hello int }
expectedErr := "invalid nesting depth"
if err := dec.Decode(&output); err == nil || err.Error() != expectedErr {
t.Errorf("Decode didn't fail with depth limit of 100: want %q, got %q", expectedErr, err)
}
}

View file

@ -152,7 +152,7 @@ func (d *Decoder) DecodeElement(v any, start *StartElement) error {
if val.IsNil() { if val.IsNil() {
return errors.New("nil pointer passed to Unmarshal") return errors.New("nil pointer passed to Unmarshal")
} }
return d.unmarshal(val.Elem(), start) return d.unmarshal(val.Elem(), start, 0)
} }
// An UnmarshalError represents an error in the unmarshaling process. // An UnmarshalError represents an error in the unmarshaling process.
@ -308,8 +308,15 @@ var (
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
) )
const maxUnmarshalDepth = 10000
var errExeceededMaxUnmarshalDepth = errors.New("exceeded max depth")
// Unmarshal a single XML element into val. // Unmarshal a single XML element into val.
func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error { func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) error {
if depth >= maxUnmarshalDepth {
return errExeceededMaxUnmarshalDepth
}
// Find start element if we need it. // Find start element if we need it.
if start == nil { if start == nil {
for { for {
@ -402,7 +409,7 @@ func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
v.Set(reflect.Append(val, reflect.Zero(v.Type().Elem()))) v.Set(reflect.Append(val, reflect.Zero(v.Type().Elem())))
// Recur to read element into slice. // Recur to read element into slice.
if err := d.unmarshal(v.Index(n), start); err != nil { if err := d.unmarshal(v.Index(n), start, depth+1); err != nil {
v.SetLen(n) v.SetLen(n)
return err return err
} }
@ -525,13 +532,15 @@ Loop:
case StartElement: case StartElement:
consumed := false consumed := false
if sv.IsValid() { if sv.IsValid() {
consumed, err = d.unmarshalPath(tinfo, sv, nil, &t) // unmarshalPath can call unmarshal, so we need to pass the depth through so that
// we can continue to enforce the maximum recusion limit.
consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth)
if err != nil { if err != nil {
return err return err
} }
if !consumed && saveAny.IsValid() { if !consumed && saveAny.IsValid() {
consumed = true consumed = true
if err := d.unmarshal(saveAny, &t); err != nil { if err := d.unmarshal(saveAny, &t, depth+1); err != nil {
return err return err
} }
} }
@ -676,7 +685,7 @@ func copyValue(dst reflect.Value, src []byte) (err error) {
// The consumed result tells whether XML elements have been consumed // The consumed result tells whether XML elements have been consumed
// from the Decoder until start's matching end element, or if it's // from the Decoder until start's matching end element, or if it's
// still untouched because start is uninteresting for sv's fields. // still untouched because start is uninteresting for sv's fields.
func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement, depth int) (consumed bool, err error) {
recurse := false recurse := false
Loop: Loop:
for i := range tinfo.fields { for i := range tinfo.fields {
@ -691,7 +700,7 @@ Loop:
} }
if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
// It's a perfect match, unmarshal the field. // It's a perfect match, unmarshal the field.
return true, d.unmarshal(finfo.value(sv, initNilPointers), start) return true, d.unmarshal(finfo.value(sv, initNilPointers), start, depth+1)
} }
if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
// It's a prefix for the field. Break and recurse // It's a prefix for the field. Break and recurse
@ -720,7 +729,9 @@ Loop:
} }
switch t := tok.(type) { switch t := tok.(type) {
case StartElement: case StartElement:
consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t) // the recursion depth of unmarshalPath is limited to the path length specified
// by the struct field tag, so we don't increment the depth here.
consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t, depth)
if err != nil { if err != nil {
return true, err return true, err
} }
@ -736,12 +747,12 @@ Loop:
} }
// Skip reads tokens until it has consumed the end element // Skip reads tokens until it has consumed the end element
// matching the most recent start element already consumed. // matching the most recent start element already consumed,
// It recurs if it encounters a start element, so it can be used to // skipping nested structures.
// skip nested structures.
// It returns nil if it finds an end element matching the start // It returns nil if it finds an end element matching the start
// element; otherwise it returns an error describing the problem. // element; otherwise it returns an error describing the problem.
func (d *Decoder) Skip() error { func (d *Decoder) Skip() error {
var depth int64
for { for {
tok, err := d.Token() tok, err := d.Token()
if err != nil { if err != nil {
@ -749,11 +760,12 @@ func (d *Decoder) Skip() error {
} }
switch tok.(type) { switch tok.(type) {
case StartElement: case StartElement:
if err := d.Skip(); err != nil { depth++
return err
}
case EndElement: case EndElement:
return nil if depth == 0 {
return nil
}
depth--
} }
} }
} }

View file

@ -5,6 +5,8 @@
package xml package xml
import ( import (
"bytes"
"errors"
"io" "io"
"reflect" "reflect"
"strings" "strings"
@ -1094,3 +1096,32 @@ func TestUnmarshalIntoNil(t *testing.T) {
} }
} }
func TestCVE202228131(t *testing.T) {
type nested struct {
Parent *nested `xml:",any"`
}
var n nested
err := Unmarshal(bytes.Repeat([]byte("<a>"), maxUnmarshalDepth+1), &n)
if err == nil {
t.Fatal("Unmarshal did not fail")
} else if !errors.Is(err, errExeceededMaxUnmarshalDepth) {
t.Fatalf("Unmarshal unexpected error: got %q, want %q", err, errExeceededMaxUnmarshalDepth)
}
}
func TestCVE202230633(t *testing.T) {
if testing.Short() {
t.Skip("test requires significant memory")
}
defer func() {
p := recover()
if p != nil {
t.Fatal("Unmarshal panicked")
}
}()
var example struct {
Things []string
}
Unmarshal(bytes.Repeat([]byte("<a>"), 17_000_000), &example)
}

View file

@ -49,10 +49,11 @@ The arguments are indexed from 0 through flag.NArg()-1.
The following forms are permitted: The following forms are permitted:
-flag -flag
--flag // double dashes are also permitted
-flag=x -flag=x
-flag x // non-boolean flags only -flag x // non-boolean flags only
One or two minus signs may be used; they are equivalent. One or two dashes may be used; they are equivalent.
The last form is not permitted for boolean flags because the The last form is not permitted for boolean flags because the
meaning of the command meaning of the command

View file

@ -715,6 +715,9 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa
tried.goroot = dir tried.goroot = dir
} }
if ctxt.Compiler == "gccgo" && goroot.IsStandardPackage(ctxt.GOROOT, ctxt.Compiler, path) { if ctxt.Compiler == "gccgo" && goroot.IsStandardPackage(ctxt.GOROOT, ctxt.Compiler, path) {
// TODO(bcmills): Setting p.Dir here is misleading, because gccgo
// doesn't actually load its standard-library packages from this
// directory. See if we can leave it unset.
p.Dir = ctxt.joinPath(ctxt.GOROOT, "src", path) p.Dir = ctxt.joinPath(ctxt.GOROOT, "src", path)
p.Goroot = true p.Goroot = true
p.Root = ctxt.GOROOT p.Root = ctxt.GOROOT

View file

@ -5,9 +5,7 @@
// Package constraint implements parsing and evaluation of build constraint lines. // Package constraint implements parsing and evaluation of build constraint lines.
// See https://golang.org/cmd/go/#hdr-Build_constraints for documentation about build constraints themselves. // See https://golang.org/cmd/go/#hdr-Build_constraints for documentation about build constraints themselves.
// //
// This package parses both the original “// +build” syntax and the “//go:build” syntax that will be added in Go 1.17. // This package parses both the original “// +build” syntax and the “//go:build” syntax that was added in Go 1.17.
// The parser is being included in Go 1.16 to allow tools that need to process Go 1.17 source code
// to still be built against the Go 1.16 release.
// See https://golang.org/design/draft-gobuild for details about the “//go:build” syntax. // See https://golang.org/design/draft-gobuild for details about the “//go:build” syntax.
package constraint package constraint

View file

@ -57,12 +57,13 @@
// //
// # Build Constraints // # Build Constraints
// //
// A build constraint, also known as a build tag, is a line comment that begins // A build constraint, also known as a build tag, is a condition under which a
// file should be included in the package. Build constraints are given by a
// line comment that begins
// //
// //go:build // //go:build
// //
// that lists the conditions under which a file should be included in the // Build constraints may also be part of a file's name
// package. Build constraints may also be part of a file's name
// (for example, source_windows.go will only be included if the target // (for example, source_windows.go will only be included if the target
// operating system is windows). // operating system is windows).
// //

View file

@ -326,6 +326,13 @@ func (p *Parser) Parse(text string) *Doc {
switch b := b.(type) { switch b := b.(type) {
case *Paragraph: case *Paragraph:
b.Text = d.parseLinkedText(string(b.Text[0].(Plain))) b.Text = d.parseLinkedText(string(b.Text[0].(Plain)))
case *List:
for _, i := range b.Items {
for _, c := range i.Content {
p := c.(*Paragraph)
p.Text = d.parseLinkedText(string(p.Text[0].(Plain)))
}
}
} }
} }

View file

@ -0,0 +1,18 @@
{"DocLinkBaseURL": "https://pkg.go.dev"}
-- input --
Did you know?
- [encoding/json.Marshal] is a doc link. So is [encoding/json.Unmarshal].
-- text --
Did you know?
- encoding/json.Marshal is a doc link. So is encoding/json.Unmarshal.
-- markdown --
Did you know?
- [encoding/json.Marshal](https://pkg.go.dev/encoding/json#Marshal) is a doc link. So is [encoding/json.Unmarshal](https://pkg.go.dev/encoding/json#Unmarshal).
-- html --
<p>Did you know?
<ul>
<li><a href="https://pkg.go.dev/encoding/json#Marshal">encoding/json.Marshal</a> is a doc link. So is <a href="https://pkg.go.dev/encoding/json#Unmarshal">encoding/json.Unmarshal</a>.
</ul>

View file

@ -0,0 +1,39 @@
{"DocLinkBaseURL": "https://pkg.go.dev"}
-- input --
Did you know?
- [testing.T] is one doc link.
- So is [testing.M].
- So is [testing.B].
This is the same list paragraph.
There is [testing.PB] in this list item, too!
-- text --
Did you know?
- testing.T is one doc link.
- So is testing.M.
- So is testing.B. This is the same list paragraph.
There is testing.PB in this list item, too!
-- markdown --
Did you know?
- [testing.T](https://pkg.go.dev/testing#T) is one doc link.
- So is [testing.M](https://pkg.go.dev/testing#M).
- So is [testing.B](https://pkg.go.dev/testing#B). This is the same list paragraph.
There is [testing.PB](https://pkg.go.dev/testing#PB) in this list item, too!
-- html --
<p>Did you know?
<ul>
<li><p><a href="https://pkg.go.dev/testing#T">testing.T</a> is one doc link.
<li><p>So is <a href="https://pkg.go.dev/testing#M">testing.M</a>.
<li><p>So is <a href="https://pkg.go.dev/testing#B">testing.B</a>.
This is the same list paragraph.
<p>There is <a href="https://pkg.go.dev/testing#PB">testing.PB</a> in this list item, too!
</ul>

View file

@ -0,0 +1,31 @@
{"DocLinkBaseURL": "https://pkg.go.dev"}
-- input --
Cool things:
- Foo
- [Go]
- Bar
[Go]: https://go.dev/
-- text --
Cool things:
- Foo
- Go
- Bar
[Go]: https://go.dev/
-- markdown --
Cool things:
- Foo
- [Go](https://go.dev/)
- Bar
-- html --
<p>Cool things:
<ul>
<li>Foo
<li><a href="https://go.dev/">Go</a>
<li>Bar
</ul>

View file

@ -0,0 +1,36 @@
{"DocLinkBaseURL": "https://pkg.go.dev"}
-- input --
Cool things:
- Foo
- [Go] is great
[Go]: https://go.dev/
- Bar
-- text --
Cool things:
- Foo
- Go is great
- Bar
[Go]: https://go.dev/
-- markdown --
Cool things:
- Foo
- [Go](https://go.dev/) is great
- Bar
-- html --
<p>Cool things:
<ul>
<li><p>Foo
<li><p><a href="https://go.dev/">Go</a> is great
<li><p>Bar
</ul>

View file

@ -94,8 +94,11 @@ func ParseFile(fset *token.FileSet, filename string, src any, mode Mode) (f *ast
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
// resume same panic if it's not a bailout // resume same panic if it's not a bailout
if _, ok := e.(bailout); !ok { bail, ok := e.(bailout)
if !ok {
panic(e) panic(e)
} else if bail.msg != "" {
p.errors.Add(p.file.Position(bail.pos), bail.msg)
} }
} }
@ -198,8 +201,11 @@ func ParseExprFrom(fset *token.FileSet, filename string, src any, mode Mode) (ex
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
// resume same panic if it's not a bailout // resume same panic if it's not a bailout
if _, ok := e.(bailout); !ok { bail, ok := e.(bailout)
if !ok {
panic(e) panic(e)
} else if bail.msg != "" {
p.errors.Add(p.file.Position(bail.pos), bail.msg)
} }
} }
p.errors.Sort() p.errors.Sort()

View file

@ -59,6 +59,10 @@ type parser struct {
inRhs bool // if set, the parser is parsing a rhs expression inRhs bool // if set, the parser is parsing a rhs expression
imports []*ast.ImportSpec // list of imports imports []*ast.ImportSpec // list of imports
// nestLev is used to track and limit the recursion depth
// during parsing.
nestLev int
} }
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) { func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
@ -108,6 +112,24 @@ func un(p *parser) {
p.printTrace(")") p.printTrace(")")
} }
// maxNestLev is the deepest we're willing to recurse during parsing
const maxNestLev int = 1e5
func incNestLev(p *parser) *parser {
p.nestLev++
if p.nestLev > maxNestLev {
p.error(p.pos, "exceeded max nesting depth")
panic(bailout{})
}
return p
}
// decNestLev is used to track nesting depth during parsing to prevent stack exhaustion.
// It is used along with incNestLev in a similar fashion to how un and trace are used.
func decNestLev(p *parser) {
p.nestLev--
}
// Advance to the next token. // Advance to the next token.
func (p *parser) next0() { func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token // Because of one-token look-ahead, print the previous token
@ -218,8 +240,12 @@ func (p *parser) next() {
} }
} }
// A bailout panic is raised to indicate early termination. // A bailout panic is raised to indicate early termination. pos and msg are
type bailout struct{} // only populated when bailing out of object resolution.
type bailout struct {
pos token.Pos
msg string
}
func (p *parser) error(pos token.Pos, msg string) { func (p *parser) error(pos token.Pos, msg string) {
if p.trace { if p.trace {
@ -1247,6 +1273,8 @@ func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr {
} }
func (p *parser) tryIdentOrType() ast.Expr { func (p *parser) tryIdentOrType() ast.Expr {
defer decNestLev(incNestLev(p))
switch p.tok { switch p.tok {
case token.IDENT: case token.IDENT:
typ := p.parseTypeName(nil) typ := p.parseTypeName(nil)
@ -1657,7 +1685,13 @@ func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr {
if x == nil { if x == nil {
x = p.parseOperand() x = p.parseOperand()
} }
for { // We track the nesting here rather than at the entry for the function,
// since it can iteratively produce a nested output, and we want to
// limit how deep a structure we generate.
var n int
defer func() { p.nestLev -= n }()
for n = 1; ; n++ {
incNestLev(p)
switch p.tok { switch p.tok {
case token.PERIOD: case token.PERIOD:
p.next() p.next()
@ -1717,6 +1751,8 @@ func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr {
} }
func (p *parser) parseUnaryExpr() ast.Expr { func (p *parser) parseUnaryExpr() ast.Expr {
defer decNestLev(incNestLev(p))
if p.trace { if p.trace {
defer un(trace(p, "UnaryExpr")) defer un(trace(p, "UnaryExpr"))
} }
@ -1806,7 +1842,13 @@ func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int, check bool) ast.Expr {
if x == nil { if x == nil {
x = p.parseUnaryExpr() x = p.parseUnaryExpr()
} }
for { // We track the nesting here rather than at the entry for the function,
// since it can iteratively produce a nested output, and we want to
// limit how deep a structure we generate.
var n int
defer func() { p.nestLev -= n }()
for n = 1; ; n++ {
incNestLev(p)
op, oprec := p.tokPrec() op, oprec := p.tokPrec()
if oprec < prec1 { if oprec < prec1 {
return x return x
@ -2099,6 +2141,8 @@ func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
} }
func (p *parser) parseIfStmt() *ast.IfStmt { func (p *parser) parseIfStmt() *ast.IfStmt {
defer decNestLev(incNestLev(p))
if p.trace { if p.trace {
defer un(trace(p, "IfStmt")) defer un(trace(p, "IfStmt"))
} }
@ -2402,6 +2446,8 @@ func (p *parser) parseForStmt() ast.Stmt {
} }
func (p *parser) parseStmt() (s ast.Stmt) { func (p *parser) parseStmt() (s ast.Stmt) {
defer decNestLev(incNestLev(p))
if p.trace { if p.trace {
defer un(trace(p, "Statement")) defer un(trace(p, "Statement"))
} }

Some files were not shown because too many files have changed in this diff Show more