Merge branch 'master' into travis-exp

This commit is contained in:
Jguer 2018-09-15 19:49:34 +01:00
commit 586dfc8ce7
No known key found for this signature in database
GPG key ID: 09754DBECF21746F
59 changed files with 5147 additions and 4312 deletions

40
Gopkg.lock generated
View file

@ -3,25 +3,47 @@
[[projects]]
branch = "master"
digest = "1:4e8f84eb856dd3ed05982f065caf7866a4995be9cc3718614dcecbd57e26c260"
name = "github.com/Morganamilo/go-pacmanconf"
packages = [
".",
"ini",
]
pruneopts = "NUT"
revision = "9c5265e1b14f7e21dafabbad9ccf801d3815d707"
[[projects]]
branch = "master"
digest = "1:5f709618dc8a0ff9221d3685c95d69ed7d80ca94e58f3483f5d9bdefb4e6bb25"
name = "github.com/Morganamilo/go-srcinfo"
packages = ["."]
pruneopts = "NUT"
revision = "368edc79b2c53cd9c065818fd4e65843ef3e9e11"
[[projects]]
branch = "master"
digest = "1:6eef31522b3a2349e9399c903ffa76c084c7fb9ef9d17a4e3da420649b0f5d17"
name = "github.com/jguer/go-alpm"
packages = ["."]
revision = "1114f773cdfb05f577438f7a0538eccabc9cf012"
pruneopts = "NUT"
revision = "643c287316a5456348cc689e1f2c980410e17d47"
[[projects]]
branch = "master"
digest = "1:07c508c49b9c13cf582c2b986635d0acd11e113e0535fa4b147026e15bc64185"
name = "github.com/mikkeloscar/aur"
packages = ["."]
revision = "837b260b8e90895c45737e2e72313fe5bce6f2c4"
[[projects]]
branch = "master"
name = "github.com/mikkeloscar/gopkgbuild"
packages = ["."]
revision = "2bb4f1f1db67f81fe50f9c1c4ad9db4f20fd6b22"
pruneopts = "NUT"
revision = "f998dbf94dc47ef839c76740efeb673d3459be1f"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "456465ee334310996a51a2282bf4cfe9f6269db508479c962474d61a4ce0a08c"
input-imports = [
"github.com/Morganamilo/go-pacmanconf",
"github.com/Morganamilo/go-srcinfo",
"github.com/jguer/go-alpm",
"github.com/mikkeloscar/aur",
]
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -13,4 +13,8 @@
[[constraint]]
branch = "master"
name = "github.com/mikkeloscar/gopkgbuild"
name = "github.com/Morganamilo/go-srcinfo"
[[constraint]]
branch = "master"
name = "github.com/Morganamilo/go-pacmanconf"

View file

@ -4,20 +4,18 @@ PREFIX := /usr
DESTDIR :=
ifndef VERSION
MAJORVERSION := 6
MAJORVERSION := 8
MINORVERSION ?= $(shell git rev-list --count master)
endif
VERSION := ${MAJORVERSION}.${MINORVERSION}
LDFLAGS := -ldflags '-s -w -X main.version=${VERSION}'
GOFILES := $(shell ls *.go | grep -v /vendor/)
ARCH := $(shell uname -m)
PKGNAME := yay
BINNAME := yay
PACKAGE := ${PKGNAME}_${VERSION}_${ARCH}
export GOPATH=$(shell pwd)/.go
export GOROOT=/usr/lib/go
default: build

173
README.md
View file

@ -1,45 +1,59 @@
# yay
# Yay
Yet another Yogurt - An AUR Helper written in Go
Yet Another Yogurt - An AUR Helper Written in Go
#### Packages
[![yay](https://img.shields.io/aur/version/yay.svg?label=yay)](https://aur.archlinux.org/packages/yay/) [![yay-bin](https://img.shields.io/aur/version/yay-bin.svg?label=yay-bin)](https://aur.archlinux.org/packages/yay-bin/) [![yay-git](https://img.shields.io/aur/version/yay-git.svg?label=yay-git)](https://aur.archlinux.org/packages/yay-git/) [![GitHub license](https://img.shields.io/github/license/jguer/yay.svg)](https://github.com/Jguer/yay/blob/master/LICENSE)
## Objectives
There's a point in everyone's life when you feel the need to write an AUR helper because there are only about 20 of them.
So say hi to 20+1.
Yay was created with a few objectives in mind and based on the design of [yaourt](https://github.com/archlinuxfr/yaourt), [apacman](https://github.com/oshazard/apacman) and [pacaur](https://github.com/rmarquis/pacaur):
Yay is based on the design of [yaourt](https://github.com/archlinuxfr/yaourt), [apacman](https://github.com/oshazard/apacman) and [pacaur](https://github.com/rmarquis/pacaur). It is developed with these objectives in mind:
* Have almost no dependencies.
* Provide an interface for pacman.
* Have yaourt like search.
* Provide an interface for pacman
* Yaourt-style interactive search/install
* Minimal dependencies
* Minimize user input
* Know when git packages are due for an upgrade.
* Know when git packages are due for upgrades
## Features
* AUR Tab completion
* Download PKGBUILD from ABS or AUR
* Ask all questions first and then start building
* Search narrowing (`yay linux header` will first search linux and then narrow on header)
* No sourcing of PKGBUILD is done
* The binary has no dependencies that pacman doesn't already have.
* Advanced dependency solving
* Perform advanced dependency solving
* Download PKGBUILDs from ABS or AUR
* Tab-complete the AUR
* Query user up-front for all input (prior to starting builds)
* Narrow search terms (`yay linux header` will first search `linux` and then narrow on `header`)
* Find matching package providers during search and allow selection
* Remove make dependencies at the end of the build process
* Run without sourcing PKGBUILD
## Installation
If you are migrating from another AUR helper you can simply install Yay from
the AUR like any other package.
If you are migrating from another AUR helper, you can simply install Yay with that helper.
The initial installation of Yay can be done by cloning the PKGBUILD and
building with makepkg.
Alternatively, the initial installation of Yay can be done by cloning the PKGBUILD and
building with makepkg:
```sh
git clone https://aur.archlinux.org/yay.git
cd yay
makepkg -si
```
## Support
All support related to Yay should be requested via GitHub issues. Since Yay is not
officially supported by Arch Linux, support should not be sought out on the
forums, AUR comments or other official channels.
A broken AUR package should be reported as a comment on the package's AUR page.
A package may only be considered broken if it fails to build with makepkg.
Reports should be made using makepkg and include the full output as well as any
other relevant information. Never make reports using Yay or any other external
tools.
## Contributing
Contributors are always welcome!
@ -49,68 +63,105 @@ on, we suggest opening an issue detailing your ideas first.
Otherwise send us a pull request and we will be happy to review it.
### Code Style
### Dependencies
All code should be formatted through `go fmt`. This tool will automatically
format code for you. Although it is recommended you write code in this style
and just use this tool to catch mistakes.
Yay depends on:
* go (make only)
* git
* base-devel
Note: Yay also depends on a few other projects (as vendored dependencies). These
projects are stored in `vendor/`, are built into yay at build time, and do not
need to be installed separately. These files are managed by
[dep](https://github.com/golang/dep) and should not be modified manually.
Following are the dependencies managed under dep:
* https://github.com/Jguer/go-alpm
* https://github.com/Morganamilo/go-srcinfo
* https://github.com/mikkeloscar/aur
### Building
Yay is easy to build with its only build dependency being `go` and the
assumption of `base-devel` being installed.
Run `make` to build Yay. This command will generate a binary called `yay` in
the same directory as the Makefile.
Run `make` to build Yay. This will generate a binary called `yay` in the same
directory as the Makefile.
Note: Yay's Makefile automatically sets the `GOPATH` to `$PWD/.go`. This path will
ensure dependencies in `vendor/` are built. Running manual go commands such as
`go build` will require that you either set the `GOPATH` manually or `go get`
the vendored dependencies into your own `GOPATH`.
Run `make test` to test Yay. This will check the code is formatted correctly,
run the code through `go vet` and run unit tests.
### Code Style
Yay's Makefile automatically sets the `GOPATH` to `$PWD/.go`. This makes it easy to
build using the dependencies in `vendor/`. Running manual go commands such as
`go build` will require that you to either set the `GOPATH` manually or `go get`
The dependencies into your own `GOPATH`.
All code should be formatted through `go fmt`. This tool will automatically
format code for you. We recommend, however, that you write code in the proper
style and use `go fmt` only to catch mistakes.
### Vendored Dependencies
### Testing
Yay depends on a couple of other projects. These are stored in `vendor/` and
are built into Yay at build time. They do not need to be installed separately.
Currently yay Depends on:
* https://github.com/Jguer/go-alpm
* https://github.com/mikkeloscar/gopkgbuild
* https://github.com/mikkeloscar/aur
Run `make test` to test Yay. This command will verify that the code is
formatted correctly, run the code through `go vet`, and run unit tests.
## Frequently Asked Questions
* Yay does not display colored output. How do I fix it?
Make sure you have the `Color` option in your `/etc/pacman.conf` [#123](https://github.com/Jguer/yay/issues/123)
#### Yay does not display colored output. How do I fix it?
Make sure you have the `Color` option in your `/etc/pacman.conf`
(see issue [#123](https://github.com/Jguer/yay/issues/123)).
* Sometimes diffs are printed to the terminal and other times they are pages
via less. How do I fix this?
Yay uses `git diff` to display diffs, by default git tells less to not page
if the output can fit one terminal length. This can be overridden by
exporting your own flags `export LESS=SRX`.
#### Yay is not prompting to skip packages during system upgrade.
The default behavior was changed after
[v8.918](https://github.com/Jguer/yay/releases/tag/v8.918)
(see [3bdb534](https://github.com/Jguer/yay/commit/3bdb5343218d99d40f8a449b887348611f6bdbfc)
and issue [#554](https://github.com/Jguer/yay/issues/554)).
To restore the package-skip behavior use `--combinedupgrade` (make
it permanent by appending `--save`). Note: skipping packages will leave your
system in a
[partially-upgraded state](https://wiki.archlinux.org/index.php/System_maintenance#Partial_upgrades_are_unsupported).
* Yay is not asking me to edit PKGBUILDS and I don't like diff menu! What do?
#### Sometimes diffs are printed to the terminal, and other times they are paged via less. How do I fix this?
Yay uses `git diff` to display diffs, which by default tells less not to
page if the output can fit into one terminal length. This behavior can be
overridden by exporting your own flags (`export LESS=SRX`).
#### Yay is not asking me to edit PKGBUILDS, and I don't like the diff menu! What can I do?
`yay --editmenu --nodiffmenu --save`
* Only act on AUR packages or only on repo packages?
`yay -{OPERATION} --aur`
`yay -{OPERATION} --repo`
#### How can I tell Yay to act only on AUR packages, or only on repo packages?
`yay -{OPERATION} --aur`
`yay -{OPERATION} --repo`
#### An `Out Of Date AUR Packages` message is displayed. Why doesn't Yay update them?
This message does not mean that updated AUR packages are available. It means
the packages have been flagged out of date on the AUR, but
their maintainers have not yet updated the `PKGBUILD`s
(see [outdated AUR packages](https://wiki.archlinux.org/index.php/Arch_User_Repository#Foo_in_the_AUR_is_outdated.3B_what_should_I_do.3F)).
#### Yay doesn't install dependencies added to a PKGBUILD during installation.
Yay resolves all dependencies ahead of time. You are free to edit the
PKGBUILD in any way, but any problems you cause are your own and should not be
reported unless they can be reproduced with the original PKGBUILD.
## Examples of Custom Operations
* `yay <Search Term>` presents package selection menu
* `yay -Ps` prints system statistics
* `yay -Pu` prints update list
* `yay -Yc` cleans unneeded dependencies
* `yay -G` downloads PKGBUILD from ABS or AUR
* `yay -Y --gendb` generates development package DB used for devel updates.
* `yay -Syu --devel --timeupdate` Normal update but also check for development
package updates and uses PKGBUILD modification time and not version to
determine update
`yay <Search Term>`
&nbsp; &nbsp; &nbsp; &nbsp; Present package-installation selection menu.
`yay -Ps`
&nbsp; &nbsp; &nbsp; &nbsp; Print system statistics.
`yay -Yc`
&nbsp; &nbsp; &nbsp; &nbsp; Clean unneeded dependencies.
`yay -G <AUR Package>`
&nbsp; &nbsp; &nbsp; &nbsp; Download PKGBUILD from ABS or AUR.
`yay -Y --gendb`
&nbsp; &nbsp; &nbsp; &nbsp; Generate development package database used for devel update.
`yay -Syu --devel --timeupdate`
&nbsp; &nbsp; &nbsp; &nbsp; Perform system upgrade, but also check for development package updates and use
&nbsp; &nbsp; &nbsp; &nbsp; PKGBUILD modification time (not version number) to determine update.
## Images

View file

@ -10,78 +10,88 @@ import (
)
func questionCallback(question alpm.QuestionAny) {
qi, err := question.QuestionInstallIgnorepkg()
if err == nil {
if qi, err := question.QuestionInstallIgnorepkg(); err == nil {
qi.SetInstall(true)
}
qp, err := question.QuestionSelectProvider()
if err == nil {
size := 0
if err != nil {
return
}
qp.Providers(alpmHandle).ForEach(func(pkg alpm.Package) error {
size++
return nil
})
size := 0
fmt.Print(bold(cyan(":: ")))
str := bold(fmt.Sprintf(bold("There are %d providers available for %s:"), size, qp.Dep()))
qp.Providers(alpmHandle).ForEach(func(pkg alpm.Package) error {
size++
return nil
})
size = 1
var db string
fmt.Print(bold(cyan(":: ")))
str := bold(fmt.Sprintf(bold("There are %d providers available for %s:"), size, qp.Dep()))
qp.Providers(alpmHandle).ForEach(func(pkg alpm.Package) error {
thisDb := pkg.DB().Name()
size = 1
var db string
if db != thisDb {
db = thisDb
str += bold(cyan("\n:: ")) + bold("Repository "+db+"\n ")
}
str += fmt.Sprintf("%d) %s ", size, pkg.Name())
size++
return nil
})
qp.Providers(alpmHandle).ForEach(func(pkg alpm.Package) error {
thisDb := pkg.DB().Name()
fmt.Println(str)
if db != thisDb {
db = thisDb
str += bold(cyan("\n:: ")) + bold("Repository "+db+"\n ")
}
str += fmt.Sprintf("%d) %s ", size, pkg.Name())
size++
return nil
})
for {
fmt.Print("\nEnter a number (default=1): ")
fmt.Println(str)
if config.NoConfirm {
fmt.Println()
break
}
for {
fmt.Print("\nEnter a number (default=1): ")
reader := bufio.NewReader(os.Stdin)
numberBuf, overflow, err := reader.ReadLine()
if err != nil {
fmt.Println(err)
break
}
if overflow {
fmt.Println("Input too long")
continue
}
if string(numberBuf) == "" {
break
}
num, err := strconv.Atoi(string(numberBuf))
if err != nil {
fmt.Printf("%s invalid number: %s\n", red("error:"), string(numberBuf))
continue
}
if num < 1 || num > size {
fmt.Printf("%s invalid value: %d is not between %d and %d\n", red("error:"), num, 1, size)
continue
}
qp.SetUseIndex(num - 1)
if config.NoConfirm {
fmt.Println()
break
}
reader := bufio.NewReader(os.Stdin)
numberBuf, overflow, err := reader.ReadLine()
if err != nil {
fmt.Println(err)
break
}
if overflow {
fmt.Println("Input too long")
continue
}
if string(numberBuf) == "" {
break
}
num, err := strconv.Atoi(string(numberBuf))
if err != nil {
fmt.Printf("%s invalid number: %s\n", red("error:"), string(numberBuf))
continue
}
if num < 1 || num > size {
fmt.Printf("%s invalid value: %d is not between %d and %d\n", red("error:"), num, 1, size)
continue
}
qp.SetUseIndex(num - 1)
break
}
}
func logCallback(level alpm.LogLevel, str string) {
switch level {
case alpm.LogWarning:
fmt.Print(bold(yellow(smallArrow)), " ", str)
case alpm.LogError:
fmt.Print(bold(red(smallArrow)), " ", str)
}
}

View file

@ -14,8 +14,7 @@ func removeVCSPackage(pkgs []string) {
updated := false
for _, pkgName := range pkgs {
_, ok := savedInfo[pkgName]
if ok {
if _, ok := savedInfo[pkgName]; ok {
delete(savedInfo, pkgName)
updated = true
}
@ -34,10 +33,10 @@ func cleanDependencies(removeOptional bool) error {
}
if len(hanging) != 0 {
err = cleanRemove(hanging)
return cleanRemove(hanging)
}
return err
return nil
}
// CleanRemove sends a full removal command to pacman with the pkgName slice
@ -49,17 +48,18 @@ func cleanRemove(pkgNames []string) (err error) {
arguments := makeArguments()
arguments.addArg("R")
arguments.addTarget(pkgNames...)
err = passToPacman(arguments)
return err
return show(passToPacman(arguments))
}
func syncClean(parser *arguments) error {
var err error
keepInstalled := false
keepCurrent := false
_, removeAll, _ := parser.getArg("c", "clean")
for _, v := range alpmConf.CleanMethod {
for _, v := range pacmanConf.CleanMethod {
if v == "KeepInstalled" {
keepInstalled = true
} else if v == "KeepCurrent" {
@ -67,9 +67,14 @@ func syncClean(parser *arguments) error {
}
}
err := passToPacman(parser)
if err != nil {
return err
if mode == ModeRepo || mode == ModeAny {
if err = show(passToPacman(parser)); err != nil {
return err
}
}
if !(mode == ModeAUR || mode == ModeAny) {
return nil
}
var question string
@ -79,10 +84,9 @@ func syncClean(parser *arguments) error {
question = "Do you want to remove all other AUR packages from cache?"
}
fmt.Println()
fmt.Printf("Build directory: %s\n", config.BuildDir)
fmt.Printf("\nBuild directory: %s\n", config.BuildDir)
if continueTask(question, "nN") {
if continueTask(question, true) {
err = cleanAUR(keepInstalled, keepCurrent, removeAll)
}
@ -90,11 +94,11 @@ func syncClean(parser *arguments) error {
return err
}
if continueTask("Do you want to remove ALL untracked AUR files?", "nN") {
err = cleanUntracked()
if continueTask("Do you want to remove ALL untracked AUR files?", true) {
return cleanUntracked()
}
return err
return nil
}
func cleanAUR(keepInstalled, keepCurrent, removeAll bool) error {
@ -185,8 +189,7 @@ func cleanUntracked() error {
dir := filepath.Join(config.BuildDir, file.Name())
if shouldUseGit(dir) {
err = passToGit(dir, "clean", "-fx")
if err != nil {
if err = show(passToGit(dir, "clean", "-fx")); err != nil {
return err
}
}

517
cmd.go
View file

@ -2,13 +2,10 @@ package main
import (
"bufio"
"bytes"
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"time"
alpm "github.com/jguer/go-alpm"
)
var cmdArgs = makeArguments()
@ -32,7 +29,7 @@ operations:
New operations:
yay {-Y --yay} [options] [package(s)]
yay {-P --print} [options]
yay {-P --show} [options]
yay {-G --getpkgbuild} [package(s)]
New options:
@ -42,7 +39,8 @@ Permanent configuration options:
--save Causes the following options to be saved back to the
config file when used
--builddir <dir> Directory to use for building AUR Packages
--aururl <url> Set an alternative AUR URL
--builddir <dir> Directory used to download and run PKBUILDS
--editor <file> Editor to use when editing PKGBUILDs
--editorflags <flags> Pass arguments to editor
--makepkg <file> makepkg command to use
@ -54,8 +52,11 @@ Permanent configuration options:
--gpg <file> gpg command to use
--gpgflags <flags> Pass arguments to gpg
--config <file> pacman.conf file to use
--makepkgconf <file> makepkg.conf file to use
--nomakepkgconf Use the default makepkg.conf
--requestsplitn <n> Max amount of packages to query per AUR request
--completioninterval <n> Time in days to to refresh completion cache
--sortby <field> Sort AUR results by a specific field during search
--answerclean <a> Set a predetermined answer for the clean build menu
--answerdiff <a> Set a predetermined answer for the diff menu
@ -73,6 +74,9 @@ Permanent configuration options:
--nodiffmenu Don't show diffs for build files
--noeditmenu Don't edit/view PKGBUILDS
--noupgrademenu Don't show the upgrade menu
--askremovemake Ask to remove makedepends after install
--removemake Remove makedepends after install
--noremovemake Don't remove makedepends after install
--afterclean Remove package sources after successful install
--noafterclean Do not remove package sources after successful build
@ -94,6 +98,10 @@ Permanent configuration options:
--noprovides Just look for packages by pkgname
--pgpfetch Prompt to import PGP keys from PKGBUILDs
--nopgpfetch Don't prompt to import PGP keys
--useask Automatically resolve conflicts using pacman's ask flag
--nouseask Confirm conflicts manually during the install
--combinedupgrade Refresh then perform the repo and AUR upgrade together
--nocombinedupgrade Perform the repo upgrade and AUR upgrade separately
--sudoloop Loop sudo calls in the background to avoid timeout
--nosudoloop Do not loop sudo calls in the background
@ -101,65 +109,25 @@ Permanent configuration options:
--timeupdate Check packages' AUR page for changes during sysupgrade
--notimeupdate Do not check packages' AUR page for changes
Print specific options:
show specific options:
-c --complete Used for completions
-d --defaultconfig Print default yay configuration
-g --config Print current yay configuration
-n --numberupgrades Print number of updates
-g --currentconfig Print current yay configuration
-s --stats Display system package statistics
-u --upgrades Print update list
-w --news Print arch news
Yay specific options:
yay specific options:
-c --clean Remove unneeded dependencies
--gendb Generates development package DB used for updating
getpkgbuild specific options:
-f --force Force download for existing tar packages
If no arguments are provided 'yay -Syu' will be performed
If no operation is provided -Y will be assumed`)
}
func sudoLoopBackground() {
updateSudo()
go sudoLoop()
}
func sudoLoop() {
for {
updateSudo()
time.Sleep(298 * time.Second)
}
}
func updateSudo() {
for {
cmd := exec.Command("sudo", "-v")
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
err := cmd.Run()
if err != nil {
fmt.Println(err)
} else {
break
}
}
}
func handleCmd() (err error) {
for option, value := range cmdArgs.options {
if handleConfig(option, value) {
cmdArgs.delArg(option)
}
}
for option, value := range cmdArgs.globals {
if handleConfig(option, value) {
cmdArgs.delArg(option)
}
}
if shouldSaveConfig {
config.saveConfig()
}
if cmdArgs.existsArg("h", "help") {
err = handleHelp()
return
@ -173,9 +141,9 @@ func handleCmd() (err error) {
case "V", "version":
handleVersion()
case "D", "database":
err = passToPacman(cmdArgs)
err = show(passToPacman(cmdArgs))
case "F", "files":
err = passToPacman(cmdArgs)
err = show(passToPacman(cmdArgs))
case "Q", "query":
err = handleQuery()
case "R", "remove":
@ -183,12 +151,12 @@ func handleCmd() (err error) {
case "S", "sync":
err = handleSync()
case "T", "deptest":
err = passToPacman(cmdArgs)
err = show(passToPacman(cmdArgs))
case "U", "upgrade":
err = passToPacman(cmdArgs)
err = show(passToPacman(cmdArgs))
case "G", "getpkgbuild":
err = handleGetpkgbuild()
case "P", "print":
case "P", "show":
err = handlePrint()
case "Y", "--yay":
err = handleYay()
@ -202,15 +170,10 @@ func handleCmd() (err error) {
}
func handleQuery() error {
var err error
if cmdArgs.existsArg("u", "upgrades") {
err = printUpdateList(cmdArgs)
} else {
err = passToPacman(cmdArgs)
return printUpdateList(cmdArgs)
}
return err
return show(passToPacman(cmdArgs))
}
func handleHelp() error {
@ -218,151 +181,21 @@ func handleHelp() error {
usage()
return nil
}
return passToPacman(cmdArgs)
}
//this function should only set config options
//but currently still uses the switch left over from old code
//eventually this should be refactored out further
//my current plan is to have yay specific operations in its own operator
//e.g. yay -Y --gendb
//e.g yay -Yg
func handleConfig(option, value string) bool {
switch option {
case "save":
shouldSaveConfig = true
case "afterclean":
config.CleanAfter = true
case "noafterclean":
config.CleanAfter = false
case "devel":
config.Devel = true
case "nodevel":
config.Devel = false
case "timeupdate":
config.TimeUpdate = true
case "notimeupdate":
config.TimeUpdate = false
case "topdown":
config.SortMode = TopDown
case "bottomup":
config.SortMode = BottomUp
case "sortby":
config.SortBy = value
case "noconfirm":
config.NoConfirm = true
case "redownload":
config.ReDownload = "yes"
case "redownloadall":
config.ReDownload = "all"
case "noredownload":
config.ReDownload = "no"
case "rebuild":
config.ReBuild = "yes"
case "rebuildall":
config.ReBuild = "all"
case "rebuildtree":
config.ReBuild = "tree"
case "norebuild":
config.ReBuild = "no"
case "answerclean":
config.AnswerClean = value
case "noanswerclean":
config.AnswerClean = ""
case "answerdiff":
config.AnswerDiff = value
case "noanswerdiff":
config.AnswerDiff = ""
case "answeredit":
config.AnswerEdit = value
case "noansweredit":
config.AnswerEdit = ""
case "answerupgrade":
config.AnswerUpgrade = value
case "noanswerupgrade":
config.AnswerUpgrade = ""
case "gitclone":
config.GitClone = true
case "nogitclone":
config.GitClone = false
case "gpgflags":
config.GpgFlags = value
case "mflags":
config.MFlags = value
case "gitflags":
config.GitFlags = value
case "builddir":
config.BuildDir = value
case "editor":
config.Editor = value
case "editorflags":
config.EditorFlags = value
case "makepkg":
config.MakepkgBin = value
case "pacman":
config.PacmanBin = value
case "tar":
config.TarBin = value
case "git":
config.GitBin = value
case "gpg":
config.GpgBin = value
case "requestsplitn":
n, err := strconv.Atoi(value)
if err == nil && n > 0 {
config.RequestSplitN = n
}
case "sudoloop":
config.SudoLoop = true
case "nosudoloop":
config.SudoLoop = false
case "provides":
config.Provides = true
case "noprovides":
config.Provides = false
case "pgpfetch":
config.PGPFetch = true
case "nopgpfetch":
config.PGPFetch = false
case "upgrademenu":
config.UpgradeMenu = true
case "noupgrademenu":
config.UpgradeMenu = false
case "cleanmenu":
config.CleanMenu = true
case "nocleanmenu":
config.CleanMenu = false
case "diffmenu":
config.DiffMenu = true
case "nodiffmenu":
config.DiffMenu = false
case "editmenu":
config.EditMenu = true
case "noeditmenu":
config.EditMenu = false
case "a", "aur":
mode = ModeAUR
case "repo":
mode = ModeRepo
default:
return false
}
return true
return show(passToPacman(cmdArgs))
}
func handleVersion() {
fmt.Printf("yay v%s\n", version)
fmt.Printf("yay v%s - libalpm v%s\n", version, alpm.Version())
}
func handlePrint() (err error) {
switch {
case cmdArgs.existsArg("d", "defaultconfig"):
var tmpConfig Configuration
defaultSettings(&tmpConfig)
tmpConfig.defaultSettings()
tmpConfig.expandEnv()
fmt.Printf("%v", tmpConfig)
case cmdArgs.existsArg("g", "config"):
case cmdArgs.existsArg("g", "currentconfig"):
fmt.Printf("%v", config)
case cmdArgs.existsArg("n", "numberupgrades"):
err = printNumberOfUpdates()
@ -370,115 +203,105 @@ func handlePrint() (err error) {
err = printUpdateList(cmdArgs)
case cmdArgs.existsArg("w", "news"):
err = printNewsFeed()
case cmdArgs.existsDouble("c", "complete"):
complete(true)
case cmdArgs.existsArg("c", "complete"):
switch {
case cmdArgs.existsArg("f", "fish"):
complete("fish")
default:
complete("sh")
}
complete(false)
case cmdArgs.existsArg("s", "stats"):
err = localStatistics()
default:
err = nil
}
return err
}
func handleYay() (err error) {
func handleYay() error {
//_, options, targets := cmdArgs.formatArgs()
if cmdArgs.existsArg("gendb") {
err = createDevelDB()
} else if cmdArgs.existsDouble("c") {
err = cleanDependencies(true)
} else if cmdArgs.existsArg("c", "clean") {
err = cleanDependencies(false)
} else if len(cmdArgs.targets) > 0 {
err = handleYogurt()
return createDevelDB()
}
return
if cmdArgs.existsDouble("c") {
return cleanDependencies(true)
}
if cmdArgs.existsArg("c", "clean") {
return cleanDependencies(false)
}
if len(cmdArgs.targets) > 0 {
return handleYogurt()
}
return nil
}
func handleGetpkgbuild() (err error) {
err = getPkgbuilds(cmdArgs.targets)
return
func handleGetpkgbuild() error {
return getPkgbuilds(cmdArgs.targets)
}
func handleYogurt() (err error) {
options := cmdArgs.formatArgs()
func handleYogurt() error {
config.SearchMode = NumberMenu
err = numberMenu(cmdArgs.targets, options)
return
return numberMenu(cmdArgs.targets)
}
func handleSync() (err error) {
func handleSync() error {
targets := cmdArgs.targets
if cmdArgs.existsArg("y", "refresh") {
arguments := cmdArgs.copy()
cmdArgs.delArg("y", "refresh")
arguments.delArg("u", "sysupgrade")
arguments.delArg("s", "search")
arguments.delArg("i", "info")
arguments.delArg("l", "list")
arguments.clearTargets()
err = passToPacman(arguments)
if err != nil {
return
}
}
if cmdArgs.existsArg("s", "search") {
if cmdArgs.existsArg("q", "quiet") {
config.SearchMode = Minimal
} else {
config.SearchMode = Detailed
}
err = syncSearch(targets)
} else if cmdArgs.existsArg("c", "clean") {
err = syncClean(cmdArgs)
} else if cmdArgs.existsArg("l", "list") {
err = passToPacman(cmdArgs)
} else if cmdArgs.existsArg("c", "clean") {
err = passToPacman(cmdArgs)
} else if cmdArgs.existsArg("i", "info") {
err = syncInfo(targets)
} else if cmdArgs.existsArg("u", "sysupgrade") {
err = install(cmdArgs)
} else if len(cmdArgs.targets) > 0 {
err = install(cmdArgs)
return syncSearch(targets)
}
return
if cmdArgs.existsArg("p", "print", "print-format") {
return show(passToPacman(cmdArgs))
}
if cmdArgs.existsArg("c", "clean") {
return syncClean(cmdArgs)
}
if cmdArgs.existsArg("l", "list") {
return show(passToPacman(cmdArgs))
}
if cmdArgs.existsArg("g", "groups") {
return show(passToPacman(cmdArgs))
}
if cmdArgs.existsArg("i", "info") {
return syncInfo(targets)
}
if cmdArgs.existsArg("u", "sysupgrade") {
return install(cmdArgs)
}
if len(cmdArgs.targets) > 0 {
return install(cmdArgs)
}
if cmdArgs.existsArg("y", "refresh") {
return show(passToPacman(cmdArgs))
}
return nil
}
func handleRemove() (err error) {
func handleRemove() error {
removeVCSPackage(cmdArgs.targets)
err = passToPacman(cmdArgs)
return
return show(passToPacman(cmdArgs))
}
// NumberMenu presents a CLI for selecting packages to install.
func numberMenu(pkgS []string, flags []string) (err error) {
func numberMenu(pkgS []string) (err error) {
var (
aurErr, repoErr error
aq aurQuery
pq repoQuery
lenaq, lenpq int
)
pkgS = removeInvalidTargets(pkgS)
var aurErr error
var repoErr error
var aq aurQuery
var pq repoQuery
var lenaq int
var lenpq int
if mode == ModeAUR || mode == ModeAny {
aq, aurErr = narrowSearch(pkgS, true)
lenaq = len(aq)
}
if mode == ModeRepo || mode == ModeAny {
pq, lenpq, repoErr = queryRepo(pkgS)
pq, repoErr = queryRepo(pkgS)
lenpq = len(pq)
if repoErr != nil {
return err
}
@ -513,12 +336,11 @@ func numberMenu(pkgS []string, flags []string) (err error) {
fmt.Print(bold(green(arrow + " ")))
reader := bufio.NewReader(os.Stdin)
numberBuf, overflow, err := reader.ReadLine()
numberBuf, overflow, err := reader.ReadLine()
if err != nil {
return err
}
if overflow {
return fmt.Errorf("Input too long")
}
@ -534,10 +356,7 @@ func numberMenu(pkgS []string, flags []string) (err error) {
target = i + 1
}
if isInclude && include.get(target) {
arguments.addTarget(pkg.DB().Name() + "/" + pkg.Name())
}
if !isInclude && !exclude.get(target) {
if (isInclude && include.get(target)) || (!isInclude && !exclude.get(target)) {
arguments.addTarget(pkg.DB().Name() + "/" + pkg.Name())
}
}
@ -548,14 +367,16 @@ func numberMenu(pkgS []string, flags []string) (err error) {
target = i + 1 + len(pq)
}
if isInclude && include.get(target) {
arguments.addTarget("aur/" + pkg.Name)
}
if !isInclude && !exclude.get(target) {
if (isInclude && include.get(target)) || (!isInclude && !exclude.get(target)) {
arguments.addTarget("aur/" + pkg.Name)
}
}
if len(arguments.targets) == 0 {
fmt.Println("There is nothing to do")
return nil
}
if config.SudoLoop {
sudoLoopBackground()
}
@ -564,143 +385,3 @@ func numberMenu(pkgS []string, flags []string) (err error) {
return err
}
// passToPacman outsources execution to pacman binary without modifications.
func passToPacman(args *arguments) error {
var cmd *exec.Cmd
argArr := make([]string, 0)
if args.needRoot() {
argArr = append(argArr, "sudo")
}
argArr = append(argArr, config.PacmanBin)
argArr = append(argArr, cmdArgs.formatGlobals()...)
argArr = append(argArr, args.formatArgs()...)
if config.NoConfirm {
argArr = append(argArr, "--noconfirm")
}
argArr = append(argArr, "--")
argArr = append(argArr, args.targets...)
cmd = exec.Command(argArr[0], argArr[1:]...)
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("")
}
return nil
}
//passToPacman but return the output instead of showing the user
func passToPacmanCapture(args *arguments) (string, string, error) {
var outbuf, errbuf bytes.Buffer
var cmd *exec.Cmd
argArr := make([]string, 0)
if args.needRoot() {
argArr = append(argArr, "sudo")
}
argArr = append(argArr, config.PacmanBin)
argArr = append(argArr, cmdArgs.formatGlobals()...)
argArr = append(argArr, args.formatArgs()...)
if config.NoConfirm {
argArr = append(argArr, "--noconfirm")
}
argArr = append(argArr, "--")
argArr = append(argArr, args.targets...)
cmd = exec.Command(argArr[0], argArr[1:]...)
cmd.Stdout = &outbuf
cmd.Stderr = &errbuf
err := cmd.Run()
stdout := outbuf.String()
stderr := errbuf.String()
return stdout, stderr, err
}
// passToMakepkg outsources execution to makepkg binary without modifications.
func passToMakepkg(dir string, args ...string) (err error) {
if config.NoConfirm {
args = append(args)
}
mflags := strings.Fields(config.MFlags)
args = append(args, mflags...)
cmd := exec.Command(config.MakepkgBin, args...)
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
cmd.Dir = dir
err = cmd.Run()
if err == nil {
_ = saveVCSInfo()
}
return
}
func passToMakepkgCapture(dir string, args ...string) (string, string, error) {
var outbuf, errbuf bytes.Buffer
if config.NoConfirm {
args = append(args)
}
mflags := strings.Fields(config.MFlags)
args = append(args, mflags...)
cmd := exec.Command(config.MakepkgBin, args...)
cmd.Dir = dir
cmd.Stdout = &outbuf
cmd.Stderr = &errbuf
err := cmd.Run()
stdout := outbuf.String()
stderr := errbuf.String()
if err == nil {
_ = saveVCSInfo()
}
return stdout, stderr, err
}
func passToGit(dir string, _args ...string) (err error) {
gitflags := strings.Fields(config.GitFlags)
args := []string{"-C", dir}
args = append(args, gitflags...)
args = append(args, _args...)
cmd := exec.Command(config.GitBin, args...)
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
err = cmd.Run()
return
}
func passToGitCapture(dir string, _args ...string) (string, string, error) {
var outbuf, errbuf bytes.Buffer
gitflags := strings.Fields(config.GitFlags)
args := []string{"-C", dir}
args = append(args, gitflags...)
args = append(args, _args...)
cmd := exec.Command(config.GitBin, args...)
cmd.Dir = dir
cmd.Stdout = &outbuf
cmd.Stderr = &errbuf
err := cmd.Run()
stdout := outbuf.String()
stderr := errbuf.String()
return stdout, stderr, err
}

View file

@ -2,7 +2,6 @@ package main
import (
"bufio"
"fmt"
"io"
"net/http"
"os"
@ -13,8 +12,8 @@ import (
)
//CreateAURList creates a new completion file
func createAURList(out *os.File, shell string) (err error) {
resp, err := http.Get("https://aur.archlinux.org/packages.gz")
func createAURList(out *os.File) (err error) {
resp, err := http.Get(config.AURURL + "/packages.gz")
if err != nil {
return err
}
@ -24,22 +23,15 @@ func createAURList(out *os.File, shell string) (err error) {
scanner.Scan()
for scanner.Scan() {
fmt.Print(scanner.Text())
out.WriteString(scanner.Text())
if shell == "fish" {
fmt.Print("\tAUR\n")
out.WriteString("\tAUR\n")
} else {
fmt.Print("\n")
out.WriteString("\n")
}
out.WriteString("\tAUR\n")
}
return nil
}
//CreatePackageList appends Repo packages to completion cache
func createRepoList(out *os.File, shell string) (err error) {
func createRepoList(out *os.File) (err error) {
dbList, err := alpmHandle.SyncDbs()
if err != nil {
return
@ -47,15 +39,8 @@ func createRepoList(out *os.File, shell string) (err error) {
_ = dbList.ForEach(func(db alpm.Db) error {
_ = db.PkgCache().ForEach(func(pkg alpm.Package) error {
fmt.Print(pkg.Name())
out.WriteString(pkg.Name())
if shell == "fish" {
fmt.Print("\t" + pkg.DB().Name() + "\n")
out.WriteString("\t" + pkg.DB().Name() + "\n")
} else {
fmt.Print("\n")
out.WriteString("\n")
}
out.WriteString("\t" + pkg.DB().Name() + "\n")
return nil
})
return nil
@ -63,33 +48,38 @@ func createRepoList(out *os.File, shell string) (err error) {
return nil
}
// Complete provides completion info for shells
func complete(shell string) error {
var path string
if shell == "fish" {
path = filepath.Join(cacheHome, "aur_fish"+".cache")
} else {
path = filepath.Join(cacheHome, "aur_sh"+".cache")
}
func updateCompletion(force bool) error {
path := filepath.Join(cacheHome, "completion.cache")
info, err := os.Stat(path)
if os.IsNotExist(err) || time.Since(info.ModTime()).Hours() > 48 {
if os.IsNotExist(err) || (config.CompletionInterval != -1 && time.Since(info.ModTime()).Hours() >= float64(config.CompletionInterval*24)) || force {
os.MkdirAll(filepath.Dir(path), 0755)
out, errf := os.Create(path)
if errf != nil {
return errf
}
if createAURList(out, shell) != nil {
if createAURList(out) != nil {
defer os.Remove(path)
}
erra := createRepoList(out, shell)
erra := createRepoList(out)
out.Close()
return erra
}
return nil
}
// Complete provides completion info for shells
func complete(force bool) error {
path := filepath.Join(cacheHome, "completion.cache")
err := updateCompletion(force)
if err != nil {
return err
}
in, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return err

View file

@ -1,4 +1,3 @@
# vim:fdm=marker foldlevel=0 tabstop=2 shiftwidth=2 filetype=bash
# This file is in the public domain.
_arch_compgen() {
@ -28,19 +27,6 @@ _arch_incomp() {
local r="\s-(-${1#* }\s|\w*${1% *})"; [[ $COMP_LINE =~ $r ]]
}
_pacman_keyids() {
\pacman-key --list-keys 2>/dev/null | awk '
$1 == "pub" {
# key id
split($2, a, "/"); print a[2]
}
$1 == "uid" {
# email
if (match($NF, /<[^>]+>/))
print substr($NF, RSTART + 1, RLENGTH - 2)
}'
}
_pacman_pkg() {
_arch_compgen "$(
if [[ $2 ]]; then
@ -51,36 +37,48 @@ _pacman_pkg() {
)"
}
_yay_pkg() {
[ -z "$cur" ] && return
_arch_compgen "$(yay -Pc)"
}
_pacman_repo_list() {
_arch_compgen "$(pacman-conf --repo-list)"
}
_yay() {
local common core cur database prev query remove sync upgrade yays print o
local common core cur database files prev query remove sync upgrade o
local yays show getpkgbuild
COMPREPLY=()
_get_comp_words_by_ref cur prev
database=('asdeps asexplicit')
files=('list machinereadable owns search refresh regex' 'l o s x y')
query=('changelog check deps explicit file foreign groups info list owns
search unrequired upgrades' 'c e g i k l m o p s t u')
query=('changelog check deps explicit file foreign groups info list native owns
search unrequired upgrades' 'c e g i k l m n o p s t u')
remove=('cascade dbonly nodeps assume-installed nosave print recursive unneeded' 'c n p s u')
sync=('asdeps asexplicit clean dbonly downloadonly force groups ignore ignoregroup
info list needed nodeps assume-installed print refresh recursive search sysupgrade'
'c g i l p s u w y')
upgrade=('asdeps asexplicit force needed nodeps assume-installed print recursive' 'p')
yays=('clean gendb' 'c')
print=('complete defaultconfig config numberupgrades stats upgrades news' 'c d g n
s u w')
common=('arch cachedir color config confirm dbpath debug gpgdir help hookdir logfile
noconfirm noprogressbar noscriptlet quiet save mflags buildir editor
noconfirm noprogressbar noscriptlet quiet root verbose
#yay stuff
makepkg pacman tar git gpg gpgflags config requestsplitn sudoloop nosudoloop
redownload noredownload redownloadall rebuild rebuildall rebuildtree norebuild
sortby answerclean answerdiff answeredit answerupgrade noanswerclean noanswerdiff
noansweredit noanswerupgrade cleanmenu diffmenu editmenu upgrademenu
nocleanmenu nodiffmenu noupgrademenu provides noprovides pgpfetch nopgpfetch
root verbose aur repo' 'a b d h q r v')
useask nouseask combinedupgrade nocombinedupgrade aur repo makepkgconf
nomakepkgconf askremovemake removemake noremovemake completioninterval aururl'
'b d h q r v')
core=('database files help query remove sync upgrade version' 'D F Q R S U V h')
for o in 'D database' 'F files' 'Q query' 'R remove' 'S sync' 'U upgrade' 'Y yays' 'P print'; do
##yay stuff
yays=('clean gendb' 'c')
show=('complete defaultconfig currentconfig stats news' 'c d g s w')
getpkgbuild=('force' 'f')
for o in 'D database' 'F files' 'Q query' 'R remove' 'S sync' 'U upgrade' 'Y yays' 'P show' 'G getpkgbuild'; do
_arch_incomp "$o" && break
done
@ -103,8 +101,8 @@ _yay() {
_pacman_pkg Qq;;
S)
{ _arch_incomp 'g groups' && _pacman_pkg Sg; } ||
{ _arch_incomp 'l list' && _arch_compgen "$(yay -Pc | \sort -u)"; } ||
_arch_compgen "$(yay -Pc )";;
{ _arch_incomp 'l list' && _pacman_repo_list; } ||
_yay_pkg;;
U)
_pacman_file;;
esac

View file

@ -8,14 +8,14 @@ complete -c $progname -f
set -l listinstalled "(pacman -Q | string replace ' ' \t)"
# This might be an issue if another package manager is also installed (e.g. for containers)
set -l listall "(yay -Pcf)"
set -l listall "(yay -Pc)"
set -l listrepos "(__fish_print_pacman_repos)"
set -l listgroups "(pacman -Sg)\t'Package Group'"
set -l listpacman "(__fish_print_packages)"
set -l noopt 'not __fish_contains_opt -s Y -s G -s V -s P -s S -s D -s Q -s R -s U -s T -s F database query sync remove upgrade deptest files'
set -l database '__fish_contains_opt -s D database'
set -l getpkgbuild '__fish_contains_opt -s G getpkgbuild'
set -l print '__fish_contains_opt -s P print'
set -l show '__fish_contains_opt -s P show'
set -l query '__fish_contains_opt -s Q query'
set -l remove '__fish_contains_opt -s R remove'
set -l sync '__fish_contains_opt -s S sync'
@ -31,7 +31,7 @@ set -l yayspecific '__fish_contains_opt -s Y yay'
complete -c $progname -s D -f -l database -n $noopt -d 'Modify the package database'
complete -c $progname -s F -f -l files -n $noopt -d 'Query the files database'
complete -c $progname -s G -f -l getpkgbuild -n $noopt -d 'Get PKGBUILD from ABS or AUR'
complete -c $progname -s P -f -l print -n $noopt -d 'Print information'
complete -c $progname -s P -f -l show -n $noopt -d 'Print information'
complete -c $progname -s Q -f -l query -n $noopt -d 'Query the package database'
complete -c $progname -s R -f -l remove -n $noopt -d 'Remove packages from the system'
complete -c $progname -s S -f -l sync -n $noopt -d 'Synchronize packages'
@ -46,6 +46,7 @@ complete -c $progname -s h -f -l help -n $noopt -d 'Display help'
complete -c $progname -n "not $noopt" -s a -l aur -d 'Assume targets are from the repositories'
complete -c $progname -n "not $noopt" -l repo -d 'Assume targets are from the AUR'
complete -c $progname -n "not $noopt" -s b -l aururl -d 'Set an alternative AUR URL' -f
complete -c $progname -n "not $noopt" -s b -l dbpath -d 'Alternative database location' -xa '(__fish_complete_directories)'
complete -c $progname -n "not $noopt" -s r -l root -d 'Alternative installation root'
complete -c $progname -n "not $noopt" -s v -l verbose -d 'Output more status messages'
@ -110,22 +111,39 @@ complete -c $progname -n "not $noopt" -l noeditmenu -d 'Do not edit/view PKGBUIL
complete -c $progname -n "not $noopt" -l noupgrademenu -d 'Do not show the upgrade menu' -f
complete -c $progname -n "not $noopt" -l provides -d 'Look for matching provders when searching for packages'
complete -c $progname -n "not $noopt" -l provides -d 'Look for matching providers when searching for packages'
complete -c $progname -n "not $noopt" -l noprovides -d 'Just look for packages by pkgname'
complete -c $progname -n "not $noopt" -l pgpfetch -d 'Prompt to import PGP keys from PKGBUILDs'
complete -c $progname -n "not $noopt" -l nopgpfetch -d 'Do not prompt to import PGP keys'
# Post V7.887
complete -c $progname -n "not $noopt" -l useask -d 'Automatically resolve conflicts using pacmans ask flag'
complete -c $progname -n "not $noopt" -l nouseask -d 'Confirm conflicts manually during the install'
complete -c $progname -n "not $noopt" -l combinedupgrade -d 'Refresh then perform the repo and AUR upgrade together'
complete -c $progname -n "not $noopt" -l nocombinedupgrade -d 'Perform the repo upgrade and AUR upgrade separately'
#Post V8.976
complete -c $progname -n "not $noopt" -l nomakepkgconf -d 'Use default makepkg.conf'
complete -c $progname -n "not $noopt" -l makepkgconf -d 'Use custom makepkg.conf location'
complete -c $progname -n "not $noopt" -l removemake -d 'Remove make deps after install'
complete -c $progname -n "not $noopt" -l askremovemake -d 'Ask to remove make deps after install'
complete -c $progname -n "not $noopt" -l noremovemake -d 'Do not remove make deps after install'
complete -c $progname -n "not $noopt" -l completioninterval -d 'Refresh interval for completion cache'
# Yay options
complete -c $progname -n $yayspecific -s c -l clean -d 'Remove unneeded dependencies' -f
complete -c $progname -n $yayspecific -l gendb -d 'Generate development package DB' -f
# Print options
complete -c $progname -n $print -s d -l defaultconfig -d 'Print current yay configuration' -f
complete -c $progname -n $print -s n -l numberupgrades -d 'Print number of updates' -f
complete -c $progname -n $print -s s -l stats -d 'Display system package statistics' -f
complete -c $progname -n $print -s u -l upgrades -d 'Print update list' -f
complete -c $progname -n $print -s w -l news -d 'Print arch news'
complete -c $progname -n $print -s q -l quiet -d 'Do not print news description'
# Show options
complete -c $progname -n $show -s d -l defaultconfig -d 'Print current yay configuration' -f
complete -c $progname -n $show -s n -l numberupgrades -d 'Print number of updates' -f
complete -c $progname -n $show -s s -l stats -d 'Display system package statistics' -f
complete -c $progname -n $show -s u -l upgrades -d 'Print update list' -f
complete -c $progname -n $show -s w -l news -d 'Print arch news'
complete -c $progname -n $show -s q -l quiet -d 'Do not print news description'
# Getpkgbuild options
complete -c $progname -n $getpkgbuild -s f -l force -d 'Force download for existing tar packages' -f
# Transaction options (sync, remove, upgrade)
for condition in sync remove upgrade

File diff suppressed because it is too large Load diff

281
config.go
View file

@ -9,6 +9,7 @@ import (
"os/exec"
"strings"
pacmanconf "github.com/Morganamilo/go-pacmanconf"
alpm "github.com/jguer/go-alpm"
)
@ -35,43 +36,49 @@ const (
// Configuration stores yay's config.
type Configuration struct {
BuildDir string `json:"buildDir"`
Editor string `json:"editor"`
EditorFlags string `json:"editorflags"`
MakepkgBin string `json:"makepkgbin"`
PacmanBin string `json:"pacmanbin"`
PacmanConf string `json:"pacmanconf"`
TarBin string `json:"tarbin"`
ReDownload string `json:"redownload"`
ReBuild string `json:"rebuild"`
AnswerClean string `json:"answerclean"`
AnswerDiff string `json:"answerdiff"`
AnswerEdit string `json:"answeredit"`
AnswerUpgrade string `json:"answerupgrade"`
GitBin string `json:"gitbin"`
GpgBin string `json:"gpgbin"`
GpgFlags string `json:"gpgflags"`
MFlags string `json:"mflags"`
SortBy string `json:"sortby"`
GitFlags string `json:"gitflags"`
RequestSplitN int `json:"requestsplitn"`
SearchMode int `json:"-"`
SortMode int `json:"sortmode"`
SudoLoop bool `json:"sudoloop"`
TimeUpdate bool `json:"timeupdate"`
NoConfirm bool `json:"-"`
Devel bool `json:"devel"`
CleanAfter bool `json:"cleanAfter"`
GitClone bool `json:"gitclone"`
Provides bool `json:"provides"`
PGPFetch bool `json:"pgpfetch"`
UpgradeMenu bool `json:"upgrademenu"`
CleanMenu bool `json:"cleanmenu"`
DiffMenu bool `json:"diffmenu"`
EditMenu bool `json:"editmenu"`
AURURL string `json:"aururl"`
BuildDir string `json:"buildDir"`
Editor string `json:"editor"`
EditorFlags string `json:"editorflags"`
MakepkgBin string `json:"makepkgbin"`
MakepkgConf string `json:"makepkgconf"`
PacmanBin string `json:"pacmanbin"`
PacmanConf string `json:"pacmanconf"`
TarBin string `json:"tarbin"`
ReDownload string `json:"redownload"`
ReBuild string `json:"rebuild"`
AnswerClean string `json:"answerclean"`
AnswerDiff string `json:"answerdiff"`
AnswerEdit string `json:"answeredit"`
AnswerUpgrade string `json:"answerupgrade"`
GitBin string `json:"gitbin"`
GpgBin string `json:"gpgbin"`
GpgFlags string `json:"gpgflags"`
MFlags string `json:"mflags"`
SortBy string `json:"sortby"`
GitFlags string `json:"gitflags"`
RemoveMake string `json:"removemake"`
RequestSplitN int `json:"requestsplitn"`
SearchMode int `json:"-"`
SortMode int `json:"sortmode"`
CompletionInterval int `json:"completionrefreshtime"`
SudoLoop bool `json:"sudoloop"`
TimeUpdate bool `json:"timeupdate"`
NoConfirm bool `json:"-"`
Devel bool `json:"devel"`
CleanAfter bool `json:"cleanAfter"`
GitClone bool `json:"gitclone"`
Provides bool `json:"provides"`
PGPFetch bool `json:"pgpfetch"`
UpgradeMenu bool `json:"upgrademenu"`
CleanMenu bool `json:"cleanmenu"`
DiffMenu bool `json:"diffmenu"`
EditMenu bool `json:"editmenu"`
CombinedUpgrade bool `json:"combinedupgrade"`
UseAsk bool `json:"useask"`
}
var version = "5.688"
var version = "8.1115"
// configFileName holds the name of the config file.
const configFileName string = "config.json"
@ -79,9 +86,6 @@ const configFileName string = "config.json"
// vcsFileName holds the name of the vcs file.
const vcsFileName string = "vcs.json"
// baseURL givers the AUR default address.
const baseURL string = "https://aur.archlinux.org"
// useColor enables/disables colored printing
var useColor bool
@ -107,25 +111,13 @@ var shouldSaveConfig bool
var config Configuration
// AlpmConf holds the current config values for pacman.
var alpmConf alpm.PacmanConfig
var pacmanConf *pacmanconf.Config
// AlpmHandle is the alpm handle used by yay.
var alpmHandle *alpm.Handle
// Mode is used to restrict yay to AUR or repo only modes
var mode targetMode = ModeAny
func readAlpmConfig(pacmanconf string) (conf alpm.PacmanConfig, err error) {
file, err := os.Open(pacmanconf)
if err != nil {
return
}
conf, err = alpm.ParseConfig(file)
if err != nil {
return
}
return
}
var mode = ModeAny
// SaveConfig writes yay config to file.
func (config *Configuration) saveConfig() error {
@ -143,13 +135,20 @@ func (config *Configuration) saveConfig() error {
return err
}
func defaultSettings(config *Configuration) {
config.BuildDir = cacheHome
func (config *Configuration) defaultSettings() {
buildDir := "$HOME/.cache/yay"
if os.Getenv("XDG_CACHE_HOME") != "" {
buildDir = "$XDG_CACHE_HOME/yay"
}
config.AURURL = "https://aur.archlinux.org"
config.BuildDir = buildDir
config.CleanAfter = false
config.Editor = ""
config.EditorFlags = ""
config.Devel = false
config.MakepkgBin = "makepkg"
config.MakepkgConf = ""
config.NoConfirm = false
config.PacmanBin = "pacman"
config.PGPFetch = true
@ -158,6 +157,7 @@ func defaultSettings(config *Configuration) {
config.MFlags = ""
config.GitFlags = ""
config.SortMode = BottomUp
config.CompletionInterval = 7
config.SortBy = "votes"
config.SudoLoop = false
config.TarBin = "bsdtar"
@ -171,12 +171,40 @@ func defaultSettings(config *Configuration) {
config.AnswerDiff = ""
config.AnswerEdit = ""
config.AnswerUpgrade = ""
config.RemoveMake = "ask"
config.GitClone = true
config.Provides = true
config.UpgradeMenu = true
config.CleanMenu = true
config.DiffMenu = true
config.EditMenu = false
config.UseAsk = false
config.CombinedUpgrade = false
}
func (config *Configuration) expandEnv() {
config.AURURL = os.ExpandEnv(config.AURURL)
config.BuildDir = os.ExpandEnv(config.BuildDir)
config.Editor = os.ExpandEnv(config.Editor)
config.EditorFlags = os.ExpandEnv(config.EditorFlags)
config.MakepkgBin = os.ExpandEnv(config.MakepkgBin)
config.MakepkgConf = os.ExpandEnv(config.MakepkgConf)
config.PacmanBin = os.ExpandEnv(config.PacmanBin)
config.PacmanConf = os.ExpandEnv(config.PacmanConf)
config.GpgFlags = os.ExpandEnv(config.GpgFlags)
config.MFlags = os.ExpandEnv(config.MFlags)
config.GitFlags = os.ExpandEnv(config.GitFlags)
config.SortBy = os.ExpandEnv(config.SortBy)
config.TarBin = os.ExpandEnv(config.TarBin)
config.GitBin = os.ExpandEnv(config.GitBin)
config.GpgBin = os.ExpandEnv(config.GpgBin)
config.ReDownload = os.ExpandEnv(config.ReDownload)
config.ReBuild = os.ExpandEnv(config.ReBuild)
config.AnswerClean = os.ExpandEnv(config.AnswerClean)
config.AnswerDiff = os.ExpandEnv(config.AnswerDiff)
config.AnswerEdit = os.ExpandEnv(config.AnswerEdit)
config.AnswerUpgrade = os.ExpandEnv(config.AnswerUpgrade)
config.RemoveMake = os.ExpandEnv(config.RemoveMake)
}
// Editor returns the preferred system editor.
@ -235,31 +263,33 @@ func editor() (string, []string) {
// ContinueTask prompts if user wants to continue task.
//If NoConfirm is set the action will continue without user input.
func continueTask(s string, def string) (cont bool) {
func continueTask(s string, cont bool) bool {
if config.NoConfirm {
return true
}
var postFix string
if def == "nN" {
postFix = " [Y/n] "
} else {
postFix = " [y/N] "
return cont
}
var response string
var postFix string
yes := "yes"
no := "no"
y := string([]rune(yes)[0])
n := string([]rune(no)[0])
if cont {
postFix = fmt.Sprintf(" [%s/%s] ", strings.ToUpper(y), n)
} else {
postFix = fmt.Sprintf(" [%s/%s] ", y, strings.ToUpper(n))
}
fmt.Print(bold(green(arrow)+" "+s), bold(postFix))
n, err := fmt.Scanln(&response)
if err != nil || n == 0 {
return true
len, err := fmt.Scanln(&response)
if err != nil || len == 0 {
return cont
}
if response == string(def[0]) || response == string(def[1]) {
return false
}
return true
response = strings.ToLower(response)
return response == yes || response == y
}
func getInput(defaultValue string) (string, error) {
@ -291,3 +321,112 @@ func (config Configuration) String() string {
}
return buf.String()
}
func toUsage(usages []string) alpm.Usage {
if len(usages) == 0 {
return alpm.UsageAll
}
var ret alpm.Usage = 0
for _, usage := range usages {
switch usage {
case "Sync":
ret |= alpm.UsageSync
case "Search":
ret |= alpm.UsageSearch
case "Install":
ret |= alpm.UsageInstall
case "Upgrade":
ret |= alpm.UsageUpgrade
case "All":
ret |= alpm.UsageAll
}
}
return ret
}
func configureAlpm(conf *pacmanconf.Config) error {
var err error
// TODO: set SigLevel
sigLevel := alpm.SigPackage | alpm.SigPackageOptional | alpm.SigDatabase | alpm.SigDatabaseOptional
localFileSigLevel := alpm.SigUseDefault
remoteFileSigLevel := alpm.SigUseDefault
for _, repo := range pacmanConf.Repos {
// TODO: set SigLevel
db, err := alpmHandle.RegisterSyncDb(repo.Name, sigLevel)
if err != nil {
return err
}
db.SetServers(repo.Servers)
db.SetUsage(toUsage(repo.Usage))
}
if err = alpmHandle.SetCacheDirs(pacmanConf.CacheDir...); err != nil {
return err
}
// add hook directories 1-by-1 to avoid overwriting the system directory
for _, dir := range pacmanConf.HookDir {
if err = alpmHandle.AddHookDir(dir); err != nil {
return err
}
}
if err = alpmHandle.SetGPGDir(pacmanConf.GPGDir); err != nil {
return err
}
if err = alpmHandle.SetLogFile(pacmanConf.LogFile); err != nil {
return err
}
if err = alpmHandle.SetIgnorePkgs(pacmanConf.IgnorePkg...); err != nil {
return err
}
if err = alpmHandle.SetIgnoreGroups(pacmanConf.IgnoreGroup...); err != nil {
return err
}
if err = alpmHandle.SetArch(pacmanConf.Architecture); err != nil {
return err
}
if err = alpmHandle.SetNoUpgrades(pacmanConf.NoUpgrade...); err != nil {
return err
}
if alpmHandle.SetNoExtracts(pacmanConf.NoExtract...); err != nil {
return err
}
if err = alpmHandle.SetDefaultSigLevel(sigLevel); err != nil {
return err
}
if err = alpmHandle.SetLocalFileSigLevel(localFileSigLevel); err != nil {
return err
}
if err = alpmHandle.SetRemoteFileSigLevel(remoteFileSigLevel); err != nil {
return err
}
if err = alpmHandle.SetDeltaRatio(pacmanConf.UseDelta); err != nil {
return err
}
if err = alpmHandle.SetUseSyslog(pacmanConf.UseSyslog); err != nil {
return err
}
if err = alpmHandle.SetCheckSpace(pacmanConf.CheckSpace); err != nil {
return err
}
return nil
}

61
config_test.go Normal file
View file

@ -0,0 +1,61 @@
package main
import (
"reflect"
"testing"
)
func expect(t *testing.T, field string, a interface{}, b interface{}, err error) {
if err != nil {
t.Error(err)
} else if !reflect.DeepEqual(a, b) {
t.Errorf("%s expected: %s got %s", field, a, b)
}
}
func TestConfig(t *testing.T) {
config.PacmanConf = "/home/morganamilo/git/yay/testdata/pacman.conf"
err := initAlpm()
if err != nil {
t.Fatal(err)
}
h := alpmHandle
root, err := h.Root()
expect(t, "RootDir", "/", root, err)
cache, err := h.CacheDirs()
expect(t, "CacheDir", []string{"/cachedir/", "/another/"}, cache.Slice(), err)
log, err := h.LogFile()
expect(t, "LogFile", "/logfile", log, err)
gpg, err := h.GPGDir()
expect(t, "GPGDir", "/gpgdir/", gpg, err)
hook, err := h.HookDirs()
expect(t, "HookDir", []string{"/usr/share/libalpm/hooks/", "/hookdir/"}, hook.Slice(), err)
delta, err := h.DeltaRatio()
expect(t, "UseDelta", 0.5, delta, err)
arch, err := h.Arch()
expect(t, "Architecture", "8086", arch, err)
ignorePkg, err := h.IgnorePkgs()
expect(t, "IgnorePkg", []string{"ignore", "this", "package"}, ignorePkg.Slice(), err)
ignoreGroup, err := h.IgnoreGroups()
expect(t, "IgnoreGroup", []string{"ignore", "this", "group"}, ignoreGroup.Slice(), err)
noUp, err := h.NoUpgrades()
expect(t, "NoUpgrade", []string{"noupgrade"}, noUp.Slice(), err)
noEx, err := h.NoExtracts()
expect(t, "NoExtract", []string{"noextract"}, noEx.Slice(), err)
check, err := h.CheckSpace()
expect(t, "CheckSpace", true, check, err)
}

20
dep.go
View file

@ -53,6 +53,10 @@ func splitDep(dep string) (string, string, string) {
return match
})
if len(split) == 0 {
return "", "", ""
}
if len(split) == 1 {
return split[0], "", ""
}
@ -145,15 +149,15 @@ func splitDbFromName(pkg string) (string, string) {
return "", split[0]
}
func getBases(pkgs map[string]*rpc.Pkg) map[string][]*rpc.Pkg {
bases := make(map[string][]*rpc.Pkg)
func getBases(pkgs []*rpc.Pkg) []Base {
basesMap := make(map[string]Base)
for _, pkg := range pkgs {
_, ok := bases[pkg.PackageBase]
if !ok {
bases[pkg.PackageBase] = make([]*rpc.Pkg, 0)
}
bases[pkg.PackageBase] = append(bases[pkg.PackageBase], pkg)
basesMap[pkg.PackageBase] = append(basesMap[pkg.PackageBase], pkg)
}
bases := make([]Base, 0, len(basesMap))
for _, base := range basesMap {
bases = append(bases, base)
}
return bases

View file

@ -6,7 +6,6 @@ import (
"sync"
alpm "github.com/jguer/go-alpm"
// gopkg "github.com/mikkeloscar/gopkgbuild"
)
func (dp *depPool) checkInnerConflict(name string, conflict string, conflicts mapStringSet) {
@ -124,7 +123,7 @@ func (dp *depPool) checkReverseConflicts(conflicts mapStringSet) {
})
}
func (dp *depPool) CheckConflicts() error {
func (dp *depPool) CheckConflicts() (mapStringSet, error) {
var wg sync.WaitGroup
innerConflicts := make(mapStringSet)
conflicts := make(mapStringSet)
@ -159,12 +158,17 @@ func (dp *depPool) CheckConflicts() error {
fmt.Println(str)
}
return fmt.Errorf("Unresolvable package conflicts, aborting")
return nil, fmt.Errorf("Unresolvable package conflicts, aborting")
}
if len(conflicts) != 0 {
fmt.Println()
fmt.Println(bold(red(arrow)), bold("Package conflicts found:"))
if !config.UseAsk {
fmt.Println(bold(red(arrow)), bold("You will have to confirm these when installing"))
}
for name, pkgs := range conflicts {
str := red(bold(smallArrow)) + " Installing " + cyan(name) + " will remove:"
for pkg := range pkgs {
@ -176,9 +180,13 @@ func (dp *depPool) CheckConflicts() error {
}
fmt.Println()
if config.NoConfirm && !config.UseAsk {
return nil, fmt.Errorf("Package conflicts can not be resolved with noconfirm, aborting")
}
}
return nil
return conflicts, nil
}
type missing struct {

View file

@ -5,19 +5,31 @@ import (
rpc "github.com/mikkeloscar/aur"
)
type Base []*rpc.Pkg
func (b Base) Pkgbase() string {
return b[0].PackageBase
}
func (b Base) Version() string {
return b[0].Version
}
func (b Base) URLPath() string {
return b[0].URLPath
}
type depOrder struct {
Aur []*rpc.Pkg
Aur []Base
Repo []*alpm.Package
Runtime stringSet
Bases map[string][]*rpc.Pkg
}
func makeDepOrder() *depOrder {
return &depOrder{
make([]*rpc.Pkg, 0),
make([]Base, 0),
make([]*alpm.Package, 0),
make(stringSet),
make(map[string][]*rpc.Pkg),
}
}
@ -65,11 +77,14 @@ func (do *depOrder) orderPkgAur(pkg *rpc.Pkg, dp *depPool, runtime bool) {
}
}
if _, ok := do.Bases[pkg.PackageBase]; !ok {
do.Aur = append(do.Aur, pkg)
do.Bases[pkg.PackageBase] = make([]*rpc.Pkg, 0)
for i, base := range do.Aur {
if base.Pkgbase() == pkg.PackageBase {
do.Aur[i] = append(base, pkg)
return
}
}
do.Bases[pkg.PackageBase] = append(do.Bases[pkg.PackageBase], pkg)
do.Aur = append(do.Aur, Base{pkg})
}
func (do *depOrder) orderPkgRepo(pkg *alpm.Package, dp *depPool, runtime bool) {
@ -92,7 +107,7 @@ func (do *depOrder) orderPkgRepo(pkg *alpm.Package, dp *depPool, runtime bool) {
func (do *depOrder) HasMake() bool {
lenAur := 0
for _, base := range do.Bases {
for _, base := range do.Aur {
lenAur += len(base)
}
@ -102,7 +117,7 @@ func (do *depOrder) HasMake() bool {
func (do *depOrder) getMake() []string {
makeOnly := make([]string, 0, len(do.Aur)+len(do.Repo)-len(do.Runtime))
for _, base := range do.Bases {
for _, base := range do.Aur {
for _, pkg := range base {
if !do.Runtime.get(pkg.Name) {
makeOnly = append(makeOnly, pkg.Name)

View file

@ -301,14 +301,12 @@ func (dp *depPool) resolveAURPackages(pkgs stringSet, explicit bool) error {
continue
}
//has satisfier installed: skip
_, isInstalled := dp.LocalDb.PkgCache().FindSatisfier(dep)
if isInstalled == nil {
_, isInstalled := dp.LocalDb.PkgCache().FindSatisfier(dep) //has satisfier installed: skip
repoPkg, inRepos := dp.SyncDb.FindSatisfier(dep) //has satisfier in repo: fetch it
if isInstalled == nil && (config.ReBuild != "tree" || inRepos == nil) {
continue
}
//has satisfier in repo: fetch it
repoPkg, inRepos := dp.SyncDb.FindSatisfier(dep)
if inRepos == nil {
dp.ResolveRepoDependency(repoPkg)
continue
@ -321,7 +319,6 @@ func (dp *depPool) resolveAURPackages(pkgs stringSet, explicit bool) error {
}
err = dp.resolveAURPackages(newAURPackages, false)
return err
}

1042
doc/yay.8

File diff suppressed because it is too large Load diff

View file

@ -8,6 +8,9 @@ import (
"os/exec"
"path/filepath"
"strings"
"sync"
alpm "github.com/jguer/go-alpm"
)
// Decide what download method to use:
@ -45,19 +48,14 @@ func downloadFile(path string, url string) (err error) {
}
func gitHasDiff(path string, name string) (bool, error) {
stdout, stderr, err := passToGitCapture(filepath.Join(path, name), "rev-parse", "HEAD")
stdout, stderr, err := capture(passToGit(filepath.Join(path, name), "rev-parse", "HEAD", "HEAD@{upstream}"))
if err != nil {
return false, fmt.Errorf("%s%s", stderr, err)
}
head := strings.TrimSpace(stdout)
stdout, stderr, err = passToGitCapture(filepath.Join(path, name), "rev-parse", "HEAD@{upstream}")
if err != nil {
return false, fmt.Errorf("%s%s", stderr, err)
}
upstream := strings.TrimSpace(stdout)
lines := strings.Split(stdout, "\n")
head := lines[0]
upstream := lines[1]
return head != upstream, nil
}
@ -65,9 +63,11 @@ func gitHasDiff(path string, name string) (bool, error) {
func gitDownload(url string, path string, name string) (bool, error) {
_, err := os.Stat(filepath.Join(path, name, ".git"))
if os.IsNotExist(err) {
err = passToGit(path, "clone", url, name)
cmd := passToGit(path, "clone", "--no-progress", url, name)
cmd.Env = append(os.Environ(), "GIT_TERMINAL_PROMPT=0")
_, stderr, err := capture(cmd)
if err != nil {
return false, fmt.Errorf("error cloning %s", name)
return false, fmt.Errorf("error cloning %s: %s", name, stderr)
}
return true, nil
@ -75,39 +75,41 @@ func gitDownload(url string, path string, name string) (bool, error) {
return false, fmt.Errorf("error reading %s", filepath.Join(path, name, ".git"))
}
err = passToGit(filepath.Join(path, name), "fetch")
cmd := passToGit(filepath.Join(path, name), "fetch")
cmd.Env = append(os.Environ(), "GIT_TERMINAL_PROMPT=0")
_, stderr, err := capture(cmd)
if err != nil {
return false, fmt.Errorf("error fetching %s", name)
return false, fmt.Errorf("error fetching %s: %s", name, stderr)
}
return false, nil
}
func gitMerge(url string, path string, name string) error {
err := passToGit(filepath.Join(path, name), "reset", "--hard", "HEAD")
func gitMerge(path string, name string) error {
_, stderr, err := capture(passToGit(filepath.Join(path, name), "reset", "--hard", "HEAD"))
if err != nil {
return fmt.Errorf("error resetting %s", name)
return fmt.Errorf("error resetting %s: %s", name, stderr)
}
err = passToGit(filepath.Join(path, name), "merge", "--no-edit", "--ff")
_, stderr, err = capture(passToGit(filepath.Join(path, name), "merge", "--no-edit", "--ff"))
if err != nil {
return fmt.Errorf("error merging %s", name)
return fmt.Errorf("error merging %s: %s", name, stderr)
}
return nil
}
func gitDiff(path string, name string) error {
err := passToGit(filepath.Join(path, name), "diff", "HEAD..HEAD@{upstream}")
err := show(passToGit(filepath.Join(path, name), "diff", "HEAD..HEAD@{upstream}"))
return err
}
// DownloadAndUnpack downloads url tgz and extracts to path.
func downloadAndUnpack(url string, path string) (err error) {
err = os.MkdirAll(path, 0755)
func downloadAndUnpack(url string, path string) error {
err := os.MkdirAll(path, 0755)
if err != nil {
return
return err
}
fileName := filepath.Base(url)
@ -117,15 +119,15 @@ func downloadAndUnpack(url string, path string) (err error) {
err = downloadFile(tarLocation, url)
if err != nil {
return
return err
}
err = exec.Command(config.TarBin, "-xf", tarLocation, "-C", path).Run()
_, stderr, err := capture(exec.Command(config.TarBin, "-xf", tarLocation, "-C", path))
if err != nil {
return
return fmt.Errorf("%s", stderr)
}
return
return nil
}
func getPkgbuilds(pkgs []string) error {
@ -136,9 +138,18 @@ func getPkgbuilds(pkgs []string) error {
}
pkgs = removeInvalidTargets(pkgs)
aur, repo, err := packageSlices(pkgs)
for n := range aur {
_, pkg := splitDbFromName(aur[n])
aur[n] = pkg
}
info, err := aurInfoPrint(aur)
if err != nil {
return err
}
if len(repo) > 0 {
missing, err = getPkgbuildsfromABS(repo, wd)
if err != nil {
@ -147,11 +158,33 @@ func getPkgbuilds(pkgs []string) error {
}
if len(aur) > 0 {
_missing, err := getPkgbuildsfromAUR(aur, wd)
if err != nil {
allBases := getBases(info)
bases := make([]Base, 0)
for _, base := range allBases {
name := base.Pkgbase()
_, err = os.Stat(filepath.Join(wd, name))
if err != nil && !os.IsNotExist(err) {
fmt.Println(bold(red(smallArrow)), err)
continue
} else if os.IsNotExist(err) || cmdArgs.existsArg("f", "force") || shouldUseGit(filepath.Join(wd, name)) {
if err = os.RemoveAll(filepath.Join(wd, name)); err != nil {
fmt.Println(bold(red(smallArrow)), err)
continue
}
} else {
fmt.Printf("%s %s %s\n", yellow(smallArrow), cyan(name), "already downloaded -- use -f to overwrite")
continue
}
bases = append(bases, base)
}
if _, err = downloadPkgbuilds(bases, nil, wd); err != nil {
return err
}
missing = missing || _missing
missing = missing || len(aur) != len(info)
}
if missing {
@ -162,107 +195,108 @@ func getPkgbuilds(pkgs []string) error {
}
// GetPkgbuild downloads pkgbuild from the ABS.
func getPkgbuildsfromABS(pkgs []string, path string) (missing bool, err error) {
func getPkgbuildsfromABS(pkgs []string, path string) (bool, error) {
var wg sync.WaitGroup
var mux sync.Mutex
var errs MultiError
names := make(map[string]string)
missing := make([]string, 0)
downloaded := 0
dbList, err := alpmHandle.SyncDbs()
if err != nil {
return
return false, err
}
nextPkg:
for _, pkgN := range pkgs {
var pkg *alpm.Package
var err error
var url string
pkgDb, name := splitDbFromName(pkgN)
for _, db := range dbList.Slice() {
if pkgDb != "" && db.Name() != pkgDb {
continue
if pkgDb != "" {
if db, err := alpmHandle.SyncDbByName(pkgDb); err == nil {
pkg, err = db.PkgByName(name)
}
pkg, err := db.PkgByName(name)
if err == nil {
var url string
name := pkg.Base()
if name == "" {
name = pkg.Name()
} else {
dbList.ForEach(func(db alpm.Db) error {
if pkg, err = db.PkgByName(name); err == nil {
return fmt.Errorf("")
}
if _, err := os.Stat(filepath.Join(path, name)); err == nil {
fmt.Println(bold(red(arrow)), bold(cyan(name)), "directory already exists")
continue nextPkg
}
switch db.Name() {
case "core", "extra":
url = "https://git.archlinux.org/svntogit/packages.git/snapshot/packages/" + name + ".tar.gz"
case "community", "multilib":
url = "https://git.archlinux.org/svntogit/community.git/snapshot/packages/" + name + ".tar.gz"
default:
fmt.Println(pkgN, "not in standard repositories")
continue nextPkg
}
errD := downloadAndUnpack(url, cacheHome)
if errD != nil {
fmt.Println(bold(red(arrow)), bold(cyan(pkg.Name())), bold(red(errD.Error())))
}
errD = exec.Command("mv", filepath.Join(cacheHome, "packages", name, "trunk"), filepath.Join(path, name)).Run()
if errD != nil {
fmt.Println(bold(red(arrow)), bold(cyan(pkg.Name())), bold(red(errD.Error())))
} else {
fmt.Println(bold(yellow(arrow)), "Downloaded", cyan(pkg.Name()), "from ABS")
}
continue nextPkg
}
return nil
})
}
fmt.Println(pkgN, "could not find package in database")
missing = true
}
if _, err := os.Stat(filepath.Join(cacheHome, "packages")); err == nil {
os.RemoveAll(filepath.Join(cacheHome, "packages"))
}
return
}
// GetPkgbuild downloads pkgbuild from the AUR.
func getPkgbuildsfromAUR(pkgs []string, dir string) (bool, error) {
missing := false
strippedPkgs := make([]string, 0)
for _, pkg := range pkgs {
_, name := splitDbFromName(pkg)
strippedPkgs = append(strippedPkgs, name)
}
aq, err := aurInfoPrint(strippedPkgs)
if err != nil {
return missing, err
}
for _, pkg := range aq {
if _, err := os.Stat(filepath.Join(dir, pkg.PackageBase)); err == nil {
fmt.Println(bold(red(arrow)), bold(cyan(pkg.Name)), "directory already exists")
if pkg == nil {
missing = append(missing, name)
continue
}
if shouldUseGit(filepath.Join(dir, pkg.PackageBase)) {
_, err = gitDownload(baseURL+"/"+pkg.PackageBase+".git", dir, pkg.PackageBase)
} else {
err = downloadAndUnpack(baseURL+aq[0].URLPath, dir)
name = pkg.Base()
if name == "" {
name = pkg.Name()
}
switch pkg.DB().Name() {
case "core", "extra", "testing":
url = "https://git.archlinux.org/svntogit/packages.git/snapshot/packages/" + name + ".tar.gz"
case "community", "multilib", "community-testing", "multilib-testing":
url = "https://git.archlinux.org/svntogit/community.git/snapshot/packages/" + name + ".tar.gz"
default:
missing = append(missing, name)
continue
}
_, err = os.Stat(filepath.Join(path, name))
if err != nil && !os.IsNotExist(err) {
fmt.Println(bold(red(smallArrow)), err)
continue
} else if os.IsNotExist(err) || cmdArgs.existsArg("f", "force") {
if err = os.RemoveAll(filepath.Join(path, name)); err != nil {
fmt.Println(bold(red(smallArrow)), err)
continue
}
} else {
fmt.Printf("%s %s %s\n", yellow(smallArrow), cyan(name), "already downloaded -- use -f to overwrite")
continue
}
names[name] = url
}
if len(missing) != 0 {
fmt.Println(yellow(bold(smallArrow)), "Missing ABS packages: ", cyan(strings.Join(missing, " ")))
}
download := func(pkg string, url string) {
defer wg.Done()
if err := downloadAndUnpack(url, cacheHome); err != nil {
errs.Add(fmt.Errorf("%s Failed to get pkgbuild: %s: %s", bold(red(arrow)), bold(cyan(pkg)), bold(red(err.Error()))))
return
}
_, stderr, err := capture(exec.Command("mv", filepath.Join(cacheHome, "packages", pkg, "trunk"), filepath.Join(path, pkg)))
mux.Lock()
downloaded++
if err != nil {
fmt.Println(err)
errs.Add(fmt.Errorf("%s Failed to move %s: %s", bold(red(arrow)), bold(cyan(pkg)), bold(red(string(stderr)))))
} else {
fmt.Println(bold(yellow(arrow)), "Downloaded", cyan(pkg.PackageBase), "from AUR")
fmt.Printf(bold(cyan("::"))+" Downloaded PKGBUILD from ABS (%d/%d): %s\n", downloaded, len(names), cyan(pkg))
}
mux.Unlock()
}
count := 0
for name, url := range names {
wg.Add(1)
go download(name, url)
count++
if count%25 == 0 {
wg.Wait()
}
}
if len(aq) != len(pkgs) {
missing = true
}
return missing, err
wg.Wait()
errs.Add(os.RemoveAll(filepath.Join(cacheHome, "packages")))
return len(missing) != 0, errs.Return()
}

123
exec.go Normal file
View file

@ -0,0 +1,123 @@
package main
import (
"bytes"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
func show(cmd *exec.Cmd) error {
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("")
}
return nil
}
func capture(cmd *exec.Cmd) (string, string, error) {
var outbuf, errbuf bytes.Buffer
cmd.Stdout = &outbuf
cmd.Stderr = &errbuf
err := cmd.Run()
stdout := strings.TrimSpace(outbuf.String())
stderr := strings.TrimSpace(errbuf.String())
return stdout, stderr, err
}
func sudoLoopBackground() {
updateSudo()
go sudoLoop()
}
func sudoLoop() {
for {
updateSudo()
time.Sleep(298 * time.Second)
}
}
func updateSudo() {
for {
err := show(exec.Command("sudo", "-v"))
if err != nil {
fmt.Println(err)
} else {
break
}
}
}
// waitLock will lock yay checking the status of db.lck until it does not exist
func waitLock() {
if _, err := os.Stat(filepath.Join(pacmanConf.DBPath, "db.lck")); err != nil {
return
}
fmt.Print(bold(yellow(smallArrow)), " db.lck is present. Waiting...")
for {
time.Sleep(3 * time.Second)
if _, err := os.Stat(filepath.Join(pacmanConf.DBPath, "db.lck")); err != nil {
fmt.Println()
return
}
}
}
func passToPacman(args *arguments) *exec.Cmd {
argArr := make([]string, 0)
if args.needRoot() {
argArr = append(argArr, "sudo")
}
argArr = append(argArr, config.PacmanBin)
argArr = append(argArr, cmdArgs.formatGlobals()...)
argArr = append(argArr, args.formatArgs()...)
if config.NoConfirm {
argArr = append(argArr, "--noconfirm")
}
argArr = append(argArr, "--config", config.PacmanConf)
argArr = append(argArr, "--")
argArr = append(argArr, args.targets...)
if args.needRoot() {
waitLock()
}
return exec.Command(argArr[0], argArr[1:]...)
}
func passToMakepkg(dir string, args ...string) *exec.Cmd {
if config.NoConfirm {
args = append(args)
}
mflags := strings.Fields(config.MFlags)
args = append(args, mflags...)
if config.MakepkgConf != "" {
args = append(args, "--config", config.MakepkgConf)
}
cmd := exec.Command(config.MakepkgBin, args...)
cmd.Dir = dir
return cmd
}
func passToGit(dir string, _args ...string) *exec.Cmd {
gitflags := strings.Fields(config.GitFlags)
args := []string{"-C", dir}
args = append(args, gitflags...)
args = append(args, _args...)
cmd := exec.Command(config.GitBin, args...)
return cmd
}

File diff suppressed because it is too large Load diff

39
keys.go
View file

@ -7,13 +7,12 @@ import (
"os/exec"
"strings"
rpc "github.com/mikkeloscar/aur"
gopkg "github.com/mikkeloscar/gopkgbuild"
gosrc "github.com/Morganamilo/go-srcinfo"
)
// pgpKeySet maps a PGP key with a list of PKGBUILDs that require it.
// This is similar to stringSet, used throughout the code.
type pgpKeySet map[string][]*rpc.Pkg
type pgpKeySet map[string][]Base
func (set pgpKeySet) toSlice() []string {
slice := make([]string, 0, len(set))
@ -23,14 +22,11 @@ func (set pgpKeySet) toSlice() []string {
return slice
}
func (set pgpKeySet) set(key string, p *rpc.Pkg) {
func (set pgpKeySet) set(key string, p Base) {
// Using ToUpper to make sure keys with a different case will be
// considered the same.
upperKey := strings.ToUpper(key)
if _, exists := set[upperKey]; !exists {
set[upperKey] = []*rpc.Pkg{}
}
set[key] = append(set[key], p)
set[key] = append(set[upperKey], p)
}
func (set pgpKeySet) get(key string) bool {
@ -41,28 +37,29 @@ func (set pgpKeySet) get(key string) bool {
// checkPgpKeys iterates through the keys listed in the PKGBUILDs and if needed,
// asks the user whether yay should try to import them.
func checkPgpKeys(pkgs []*rpc.Pkg, bases map[string][]*rpc.Pkg, srcinfos map[string]*gopkg.PKGBUILD) error {
func checkPgpKeys(bases []Base, srcinfos map[string]*gosrc.Srcinfo) error {
// Let's check the keys individually, and then we can offer to import
// the problematic ones.
problematic := make(pgpKeySet)
args := append(strings.Fields(config.GpgFlags), "--list-keys")
// Mapping all the keys.
for _, pkg := range pkgs {
srcinfo := srcinfos[pkg.PackageBase]
for _, base := range bases {
pkg := base.Pkgbase()
srcinfo := srcinfos[pkg]
for _, key := range srcinfo.Validpgpkeys {
for _, key := range srcinfo.ValidPGPKeys {
// If key already marked as problematic, indicate the current
// PKGBUILD requires it.
if problematic.get(key) {
problematic.set(key, pkg)
problematic.set(key, base)
continue
}
cmd := exec.Command(config.GpgBin, append(args, key)...)
err := cmd.Run()
if err != nil {
problematic.set(key, pkg)
problematic.set(key, base)
}
}
}
@ -72,7 +69,7 @@ func checkPgpKeys(pkgs []*rpc.Pkg, bases map[string][]*rpc.Pkg, srcinfos map[str
return nil
}
str, err := formatKeysToImport(problematic, bases)
str, err := formatKeysToImport(problematic)
if err != nil {
return err
}
@ -80,7 +77,7 @@ func checkPgpKeys(pkgs []*rpc.Pkg, bases map[string][]*rpc.Pkg, srcinfos map[str
fmt.Println()
fmt.Println(str)
if continueTask(bold(green("Import?")), "nN") {
if continueTask(bold(green("Import?")), true) {
return importKeys(problematic.toSlice())
}
@ -104,7 +101,7 @@ func importKeys(keys []string) error {
// formatKeysToImport receives a set of keys and returns a string containing the
// question asking the user wants to import the problematic keys.
func formatKeysToImport(keys pgpKeySet, bases map[string][]*rpc.Pkg) (string, error) {
func formatKeysToImport(keys pgpKeySet) (string, error) {
if len(keys) == 0 {
return "", fmt.Errorf("%s No keys to import", bold(red(arrow+" Error:")))
}
@ -112,12 +109,12 @@ func formatKeysToImport(keys pgpKeySet, bases map[string][]*rpc.Pkg) (string, er
var buffer bytes.Buffer
buffer.WriteString(bold(green(arrow)))
buffer.WriteString(bold(green(" PGP keys need importing:")))
for key, pkgs := range keys {
for key, bases := range keys {
pkglist := ""
for _, pkg := range pkgs {
pkglist += formatPkgbase(pkg, bases) + " "
for _, base := range bases {
pkglist += base.String() + " "
}
pkglist = strings.TrimRight(pkglist, " ")
pkglist = strings.TrimRight(pkglist, " ")
buffer.WriteString(fmt.Sprintf("\n%s %s, required by: %s", yellow(bold(smallArrow)), cyan(key), cyan(pkglist)))
}
return buffer.String(), nil

View file

@ -11,8 +11,8 @@ import (
"regexp"
"testing"
gosrc "github.com/Morganamilo/go-srcinfo"
rpc "github.com/mikkeloscar/aur"
gopkg "github.com/mikkeloscar/gopkgbuild"
)
const (
@ -124,6 +124,14 @@ func TestImportKeys(t *testing.T) {
}
}
func makeSrcinfo(pkgbase string, pgpkeys ...string) *gosrc.Srcinfo {
srcinfo := gosrc.Srcinfo{}
srcinfo.Pkgbase = pkgbase
srcinfo.ValidPGPKeys = pgpkeys
return &srcinfo
}
func TestCheckPgpKeys(t *testing.T) {
keyringDir, err := ioutil.TempDir("/tmp", "yay-test-keyring")
if err != nil {
@ -138,34 +146,30 @@ func TestCheckPgpKeys(t *testing.T) {
defer server.Shutdown(context.TODO())
casetests := []struct {
pkgs []*rpc.Pkg
srcinfos map[string]*gopkg.PKGBUILD
bases map[string][]*rpc.Pkg
pkgs Base
srcinfos map[string]*gosrc.Srcinfo
wantError bool
}{
// cower: single package, one valid key not yet in the keyring.
// 487EACC08557AD082088DABA1EB2638FF56C0C53: Dave Reisner.
{
pkgs: []*rpc.Pkg{newPkg("cower")},
srcinfos: map[string]*gopkg.PKGBUILD{"cower": {Pkgbase: "cower", Validpgpkeys: []string{"487EACC08557AD082088DABA1EB2638FF56C0C53"}}},
bases: map[string][]*rpc.Pkg{"cower": {newPkg("cower")}},
pkgs: Base{newPkg("cower")},
srcinfos: map[string]*gosrc.Srcinfo{"cower": makeSrcinfo("cower", "487EACC08557AD082088DABA1EB2638FF56C0C53")},
wantError: false,
},
// libc++: single package, two valid keys not yet in the keyring.
// 11E521D646982372EB577A1F8F0871F202119294: Tom Stellard.
// B6C8F98282B944E3B0D5C2530FC3042E345AD05D: Hans Wennborg.
{
pkgs: []*rpc.Pkg{newPkg("libc++")},
srcinfos: map[string]*gopkg.PKGBUILD{"libc++": {Pkgbase: "libc++", Validpgpkeys: []string{"11E521D646982372EB577A1F8F0871F202119294", "B6C8F98282B944E3B0D5C2530FC3042E345AD05D"}}},
bases: map[string][]*rpc.Pkg{"libc++": {newPkg("libc++")}},
pkgs: Base{newPkg("libc++")},
srcinfos: map[string]*gosrc.Srcinfo{"libc++": makeSrcinfo("libc++", "11E521D646982372EB577A1F8F0871F202119294", "B6C8F98282B944E3B0D5C2530FC3042E345AD05D")},
wantError: false,
},
// Two dummy packages requiring the same key.
// ABAF11C65A2970B130ABE3C479BE3E4300411886: Linus Torvalds.
{
pkgs: []*rpc.Pkg{newPkg("dummy-1"), newPkg("dummy-2")},
srcinfos: map[string]*gopkg.PKGBUILD{"dummy-1": {Pkgbase: "dummy-1", Validpgpkeys: []string{"ABAF11C65A2970B130ABE3C479BE3E4300411886"}}, "dummy-2": {Pkgbase: "dummy-2", Validpgpkeys: []string{"ABAF11C65A2970B130ABE3C479BE3E4300411886"}}},
bases: map[string][]*rpc.Pkg{"dummy-1": {newPkg("dummy-1")}, "dummy-2": {newPkg("dummy-2")}},
pkgs: Base{newPkg("dummy-1"), newPkg("dummy-2")},
srcinfos: map[string]*gosrc.Srcinfo{"dummy-1": makeSrcinfo("dummy-1", "ABAF11C65A2970B130ABE3C479BE3E4300411886"), "dummy-2": makeSrcinfo("dummy-2", "ABAF11C65A2970B130ABE3C479BE3E4300411886")},
wantError: false,
},
// dummy package: single package, two valid keys, one of them already
@ -173,37 +177,33 @@ func TestCheckPgpKeys(t *testing.T) {
// 11E521D646982372EB577A1F8F0871F202119294: Tom Stellard.
// C52048C0C0748FEE227D47A2702353E0F7E48EDB: Thomas Dickey.
{
pkgs: []*rpc.Pkg{newPkg("dummy-3")},
srcinfos: map[string]*gopkg.PKGBUILD{"dummy-3": {Pkgbase: "dummy-3", Validpgpkeys: []string{"11E521D646982372EB577A1F8F0871F202119294", "C52048C0C0748FEE227D47A2702353E0F7E48EDB"}}},
bases: map[string][]*rpc.Pkg{"dummy-3": {newPkg("dummy-3")}},
pkgs: Base{newPkg("dummy-3")},
srcinfos: map[string]*gosrc.Srcinfo{"dummy-3": makeSrcinfo("dummy-3", "11E521D646982372EB577A1F8F0871F202119294", "C52048C0C0748FEE227D47A2702353E0F7E48EDB")},
wantError: false,
},
// Two dummy packages with existing keys.
{
pkgs: []*rpc.Pkg{newPkg("dummy-4"), newPkg("dummy-5")},
srcinfos: map[string]*gopkg.PKGBUILD{"dummy-4": {Pkgbase: "dummy-4", Validpgpkeys: []string{"11E521D646982372EB577A1F8F0871F202119294"}}, "dummy-5": {Pkgbase: "dummy-5", Validpgpkeys: []string{"C52048C0C0748FEE227D47A2702353E0F7E48EDB"}}},
bases: map[string][]*rpc.Pkg{"dummy-4": {newPkg("dummy-4")}, "dummy-5": {newPkg("dummy-5")}},
pkgs: Base{newPkg("dummy-4"), newPkg("dummy-5")},
srcinfos: map[string]*gosrc.Srcinfo{"dummy-4": makeSrcinfo("dummy-4", "11E521D646982372EB577A1F8F0871F202119294"), "dummy-5": makeSrcinfo("dummy-5", "C52048C0C0748FEE227D47A2702353E0F7E48EDB")},
wantError: false,
},
// Dummy package with invalid key, should fail.
{
pkgs: []*rpc.Pkg{newPkg("dummy-7")},
srcinfos: map[string]*gopkg.PKGBUILD{"dummy-7": {Pkgbase: "dummy-7", Validpgpkeys: []string{"THIS-SHOULD-FAIL"}}},
bases: map[string][]*rpc.Pkg{"dummy-7": {newPkg("dummy-7")}},
pkgs: Base{newPkg("dummy-7")},
srcinfos: map[string]*gosrc.Srcinfo{"dummy-7": makeSrcinfo("dummy-7", "THIS-SHOULD-FAIL")},
wantError: true,
},
// Dummy package with both an invalid an another valid key, should fail.
// A314827C4E4250A204CE6E13284FC34C8E4B1A25: Thomas Bächler.
{
pkgs: []*rpc.Pkg{newPkg("dummy-8")},
srcinfos: map[string]*gopkg.PKGBUILD{"dummy-8": {Pkgbase: "dummy-8", Validpgpkeys: []string{"A314827C4E4250A204CE6E13284FC34C8E4B1A25", "THIS-SHOULD-FAIL"}}},
bases: map[string][]*rpc.Pkg{"dummy-8": {newPkg("dummy-8")}},
pkgs: Base{newPkg("dummy-8")},
srcinfos: map[string]*gosrc.Srcinfo{"dummy-8": makeSrcinfo("dummy-8", "A314827C4E4250A204CE6E13284FC34C8E4B1A25", "THIS-SHOULD-FAIL")},
wantError: true,
},
}
for _, tt := range casetests {
err := checkPgpKeys(tt.pkgs, tt.bases, tt.srcinfos)
err := checkPgpKeys([]Base{tt.pkgs}, tt.srcinfos)
if !tt.wantError {
if err != nil {
t.Fatalf("Got error %q, want no error", err)

317
main.go
View file

@ -7,34 +7,23 @@ import (
"path/filepath"
"strings"
pacmanconf "github.com/Morganamilo/go-pacmanconf"
alpm "github.com/jguer/go-alpm"
)
func setPaths() error {
if _configHome, set := os.LookupEnv("XDG_CONFIG_HOME"); set {
if _configHome == "" {
return fmt.Errorf("XDG_CONFIG_HOME set but empty")
}
configHome = filepath.Join(_configHome, "yay")
} else if _configHome, set := os.LookupEnv("HOME"); set {
if _configHome == "" {
return fmt.Errorf("HOME set but empty")
}
configHome = filepath.Join(_configHome, ".config/yay")
if configHome = os.Getenv("XDG_CONFIG_HOME"); configHome != "" {
configHome = filepath.Join(configHome, "yay")
} else if configHome = os.Getenv("HOME"); configHome != "" {
configHome = filepath.Join(configHome, ".config/yay")
} else {
return fmt.Errorf("XDG_CONFIG_HOME and HOME unset")
}
if _cacheHome, set := os.LookupEnv("XDG_CACHE_HOME"); set {
if _cacheHome == "" {
return fmt.Errorf("XDG_CACHE_HOME set but empty")
}
cacheHome = filepath.Join(_cacheHome, "yay")
} else if _cacheHome, set := os.LookupEnv("HOME"); set {
if _cacheHome == "" {
return fmt.Errorf("XDG_CACHE_HOME set but empty")
}
cacheHome = filepath.Join(_cacheHome, ".cache/yay")
if cacheHome = os.Getenv("XDG_CACHE_HOME"); cacheHome != "" {
cacheHome = filepath.Join(cacheHome, "yay")
} else if cacheHome = os.Getenv("HOME"); cacheHome != "" {
cacheHome = filepath.Join(cacheHome, ".cache/yay")
} else {
return fmt.Errorf("XDG_CACHE_HOME and HOME unset")
}
@ -45,207 +34,189 @@ func setPaths() error {
return nil
}
func initConfig() (err error) {
defaultSettings(&config)
func initConfig() error {
cfile, err := os.Open(configFile)
if !os.IsNotExist(err) && err != nil {
return fmt.Errorf("Failed to open config file '%s': %s", configFile, err)
}
if _, err = os.Stat(configFile); os.IsNotExist(err) {
err = os.MkdirAll(filepath.Dir(configFile), 0755)
if err != nil {
err = fmt.Errorf("Unable to create config directory:\n%s\n"+
"The error was:\n%s", filepath.Dir(configFile), err)
return
}
// Save the default config if nothing is found
config.saveConfig()
} else {
cfile, errf := os.OpenFile(configFile, os.O_RDWR|os.O_CREATE, 0644)
if errf != nil {
fmt.Printf("Error reading config: %s\n", err)
} else {
defer cfile.Close()
decoder := json.NewDecoder(cfile)
err = decoder.Decode(&config)
if err != nil {
fmt.Println("Loading default Settings.\nError reading config:",
err)
defaultSettings(&config)
}
if _, err = os.Stat(config.BuildDir); os.IsNotExist(err) {
err = os.MkdirAll(config.BuildDir, 0755)
if err != nil {
err = fmt.Errorf("Unable to create BuildDir directory:\n%s\n"+
"The error was:\n%s", config.BuildDir, err)
return
}
}
defer cfile.Close()
if !os.IsNotExist(err) {
decoder := json.NewDecoder(cfile)
if err = decoder.Decode(&config); err != nil {
return fmt.Errorf("Failed to read config '%s': %s", configFile, err)
}
}
return
return nil
}
func initVCS() (err error) {
if _, err = os.Stat(vcsFile); os.IsNotExist(err) {
err = os.MkdirAll(filepath.Dir(vcsFile), 0755)
if err != nil {
err = fmt.Errorf("Unable to create vcs directory:\n%s\n"+
"The error was:\n%s", filepath.Dir(configFile), err)
return
}
} else {
vfile, err := os.OpenFile(vcsFile, os.O_RDONLY|os.O_CREATE, 0644)
if err == nil {
defer vfile.Close()
decoder := json.NewDecoder(vfile)
_ = decoder.Decode(&savedInfo)
func initVCS() error {
vfile, err := os.Open(vcsFile)
if !os.IsNotExist(err) && err != nil {
return fmt.Errorf("Failed to open vcs file '%s': %s", vcsFile, err)
}
defer vfile.Close()
if !os.IsNotExist(err) {
decoder := json.NewDecoder(vfile)
if err = decoder.Decode(&savedInfo); err != nil {
return fmt.Errorf("Failed to read vcs '%s': %s", vcsFile, err)
}
}
return
return nil
}
func initAlpm() (err error) {
var value string
var exists bool
//var double bool
value, _, exists = cmdArgs.getArg("config")
if exists {
config.PacmanConf = value
func initHomeDirs() error {
if _, err := os.Stat(configHome); os.IsNotExist(err) {
if err = os.MkdirAll(configHome, 0755); err != nil {
return fmt.Errorf("Failed to create config directory '%s': %s", configHome, err)
}
} else if err != nil {
return err
}
alpmConf, err = readAlpmConfig(config.PacmanConf)
if _, err := os.Stat(cacheHome); os.IsNotExist(err) {
if err = os.MkdirAll(cacheHome, 0755); err != nil {
return fmt.Errorf("Failed to create cache directory '%s': %s", cacheHome, err)
}
} else if err != nil {
return err
}
return nil
}
func initBuildDir() error {
if _, err := os.Stat(config.BuildDir); os.IsNotExist(err) {
if err = os.MkdirAll(config.BuildDir, 0755); err != nil {
return fmt.Errorf("Failed to create BuildDir directory '%s': %s", config.BuildDir, err)
}
} else if err != nil {
return err
}
return nil
}
func initAlpm() error {
var err error
var stderr string
root := "/"
if value, _, exists := cmdArgs.getArg("root", "r"); exists {
root = value
}
pacmanConf, stderr, err = pacmanconf.PacmanConf("--config", config.PacmanConf, "--root", root)
if err != nil {
err = fmt.Errorf("Unable to read Pacman conf: %s", err)
return
return fmt.Errorf("%s", stderr)
}
value, _, exists = cmdArgs.getArg("dbpath", "b")
if exists {
alpmConf.DBPath = value
if value, _, exists := cmdArgs.getArg("dbpath", "b"); exists {
pacmanConf.DBPath = value
}
value, _, exists = cmdArgs.getArg("root", "r")
if exists {
alpmConf.RootDir = value
if value, _, exists := cmdArgs.getArg("arch"); exists {
pacmanConf.Architecture = value
}
value, _, exists = cmdArgs.getArg("arch")
if exists {
alpmConf.Architecture = value
if value, _, exists := cmdArgs.getArg("ignore"); exists {
pacmanConf.IgnorePkg = append(pacmanConf.IgnorePkg, strings.Split(value, ",")...)
}
value, _, exists = cmdArgs.getArg("ignore")
if exists {
alpmConf.IgnorePkg = append(alpmConf.IgnorePkg, strings.Split(value, ",")...)
}
value, _, exists = cmdArgs.getArg("ignoregroup")
if exists {
alpmConf.IgnoreGroup = append(alpmConf.IgnoreGroup, strings.Split(value, ",")...)
if value, _, exists := cmdArgs.getArg("ignoregroup"); exists {
pacmanConf.IgnoreGroup = append(pacmanConf.IgnoreGroup, strings.Split(value, ",")...)
}
//TODO
//current system does not allow duplicate arguments
//but pacman allows multiple cachdirs to be passed
//for now only handle one cache dir
value, _, exists = cmdArgs.getArg("cachdir")
if exists {
alpmConf.CacheDir = []string{value}
if value, _, exists := cmdArgs.getArg("cachdir"); exists {
pacmanConf.CacheDir = []string{value}
}
value, _, exists = cmdArgs.getArg("gpgdir")
if exists {
alpmConf.GPGDir = value
if value, _, exists := cmdArgs.getArg("gpgdir"); exists {
pacmanConf.GPGDir = value
}
alpmHandle, err = alpmConf.CreateHandle()
if err != nil {
err = fmt.Errorf("Unable to CreateHandle: %s", err)
return
if err = initAlpmHandle(); err != nil {
return err
}
value, _, _ = cmdArgs.getArg("color")
if value == "always" || value == "auto" {
if value, _, _ := cmdArgs.getArg("color"); value == "always" || value == "auto" {
useColor = true
} else if value == "never" {
useColor = false
} else {
useColor = alpmConf.Options&alpm.ConfColor > 0
useColor = pacmanConf.Color
}
return nil
}
func initAlpmHandle() error {
var err error
if alpmHandle != nil {
if err := alpmHandle.Release(); err != nil {
return err
}
}
if alpmHandle, err = alpm.Init(pacmanConf.RootDir, pacmanConf.DBPath); err != nil {
return fmt.Errorf("Unable to CreateHandle: %s", err)
}
if err = configureAlpm(pacmanConf); err != nil {
return err
}
alpmHandle.SetQuestionCallback(questionCallback)
alpmHandle.SetLogCallback(logCallback)
return nil
}
return
func exitOnError(err error) {
if err != nil {
if str := err.Error(); str != "" {
fmt.Println(str)
}
cleanup()
os.Exit(1)
}
}
func cleanup() int {
if alpmHandle != nil {
if err := alpmHandle.Release(); err != nil {
fmt.Println(err)
return 1
}
}
return 0
}
func main() {
var status int
var err error
if 0 == os.Geteuid() {
fmt.Println("Please avoid running yay as root/sudo.")
}
err = cmdArgs.parseCommandLine()
if err != nil {
fmt.Println(err)
status = 1
goto cleanup
exitOnError(setPaths())
config.defaultSettings()
exitOnError(initHomeDirs())
exitOnError(initConfig())
exitOnError(cmdArgs.parseCommandLine())
if shouldSaveConfig {
config.saveConfig()
}
err = setPaths()
if err != nil {
fmt.Println(err)
status = 1
goto cleanup
}
err = initConfig()
if err != nil {
fmt.Println(err)
status = 1
goto cleanup
}
err = initVCS()
if err != nil {
fmt.Println(err)
status = 1
goto cleanup
}
err = initAlpm()
if err != nil {
fmt.Println(err)
status = 1
goto cleanup
}
err = handleCmd()
if err != nil {
if err.Error() != "" {
fmt.Println(err)
}
status = 1
goto cleanup
}
cleanup:
//cleanup
//from here on out don't exit if an error occurs
//if we fail to save the configuration
//at least continue on and try clean up other parts
if alpmHandle != nil {
err = alpmHandle.Release()
if err != nil {
fmt.Println(err)
status = 1
}
}
os.Exit(status)
config.expandEnv()
exitOnError(initBuildDir())
exitOnError(initVCS())
exitOnError(initAlpm())
exitOnError(handleCmd())
os.Exit(cleanup())
}

479
parser.go
View file

@ -1,13 +1,16 @@
package main
import (
"bufio"
"bytes"
"fmt"
"html"
"io"
"os"
"strconv"
"strings"
"unicode"
rpc "github.com/mikkeloscar/aur"
)
// A basic set implementation for strings.
@ -121,14 +124,11 @@ func (parser *arguments) needRoot() bool {
return false
}
if parser.existsArg("p", "print") {
return false
}
switch parser.op {
case "V", "version":
return false
case "D", "database":
if parser.existsArg("k", "check") {
return false
}
return true
case "F", "files":
if parser.existsArg("y", "refresh") {
@ -136,6 +136,9 @@ func (parser *arguments) needRoot() bool {
}
return false
case "Q", "query":
if parser.existsArg("k", "check") {
return true
}
return false
case "R", "remove":
return true
@ -143,8 +146,8 @@ func (parser *arguments) needRoot() bool {
if parser.existsArg("y", "refresh") {
return true
}
if parser.existsArg("u", "sysupgrade") {
return true
if parser.existsArg("p", "print", "print-format") {
return false
}
if parser.existsArg("s", "search") {
return false
@ -152,22 +155,18 @@ func (parser *arguments) needRoot() bool {
if parser.existsArg("l", "list") {
return false
}
if parser.existsArg("g", "groups") {
return false
}
if parser.existsArg("i", "info") {
return false
}
if parser.existsArg("c", "clean") && mode == ModeAUR {
return false
}
return true
case "T", "deptest":
return false
case "U", "upgrade":
return true
// yay specific
case "Y", "yay":
return false
case "P", "print":
return false
case "G", "getpkgbuild":
return false
default:
return false
}
@ -184,6 +183,10 @@ func (parser *arguments) addOP(op string) (err error) {
}
func (parser *arguments) addParam(option string, arg string) (err error) {
if !isArg(option) {
return fmt.Errorf("invalid option '%s'", option)
}
if isOp(option) {
err = parser.addOP(option)
return
@ -343,143 +346,392 @@ func formatArg(arg string) string {
return arg
}
func isOp(op string) bool {
switch op {
case "V", "version":
return true
func isArg(arg string) bool {
switch arg {
case "-", "--":
case "D", "database":
return true
case "F", "files":
return true
case "Q", "query":
return true
case "R", "remove":
return true
case "S", "sync":
return true
case "T", "deptest":
return true
case "U", "upgrade":
return true
// yay specific
case "F", "files":
case "V", "version":
case "h", "help":
case "Y", "yay":
return true
case "P", "print":
return true
case "P", "show":
case "G", "getpkgbuild":
return true
case "b", "dbpath":
case "r", "root":
case "v", "verbose":
case "arch":
case "cachedir":
case "color":
case "config":
case "debug":
case "gpgdir":
case "hookdir":
case "logfile":
case "noconfirm":
case "confirm":
case "disabledownloadtimeout":
case "sysroot":
case "d", "nodeps":
case "assumeinstalled":
case "dbonly":
case "noprogressbar":
case "noscriptlet":
case "p":
case "printformat":
case "asdeps":
case "asexplicit":
case "ignore":
case "ignoregroup":
case "needed":
case "overwrite":
case "f", "force":
case "c", "changelog":
case "deps":
case "e", "explicit":
case "g", "groups":
case "i", "info":
case "k", "check":
case "l", "list":
case "m", "foreign":
case "n", "native":
case "o", "owns":
case "file":
case "q", "quiet":
case "s", "search":
case "t", "unrequired":
case "u", "upgrades":
case "cascade":
case "nosave":
case "recursive":
case "unneeded":
case "clean":
case "sysupgrade":
case "w", "downloadonly":
case "y", "refresh":
case "x", "regex":
case "machinereadable":
//yay options
case "aururl":
case "save":
case "afterclean":
case "noafterclean":
case "devel":
case "nodevel":
case "timeupdate":
case "notimeupdate":
case "topdown":
case "bottomup":
case "completioninterval":
case "sortby":
case "redownload":
case "redownloadall":
case "noredownload":
case "rebuild":
case "rebuildall":
case "rebuildtree":
case "norebuild":
case "answerclean":
case "noanswerclean":
case "answerdiff":
case "noanswerdiff":
case "answeredit":
case "noansweredit":
case "answerupgrade":
case "noanswerupgrade":
case "gitclone":
case "nogitclone":
case "gpgflags":
case "mflags":
case "gitflags":
case "builddir":
case "editor":
case "editorflags":
case "makepkg":
case "makepkgconf":
case "nomakepkgconf":
case "pacman":
case "tar":
case "git":
case "gpg":
case "requestsplitn":
case "sudoloop":
case "nosudoloop":
case "provides":
case "noprovides":
case "pgpfetch":
case "nopgpfetch":
case "upgrademenu":
case "noupgrademenu":
case "cleanmenu":
case "nocleanmenu":
case "diffmenu":
case "nodiffmenu":
case "editmenu":
case "noeditmenu":
case "useask":
case "nouseask":
case "combinedupgrade":
case "nocombinedupgrade":
case "a", "aur":
case "repo":
case "removemake":
case "noremovemake":
case "askremovemake":
case "complete":
case "stats":
case "news":
case "gendb":
case "currentconfig":
default:
return false
}
return true
}
func handleConfig(option, value string) bool {
switch option {
case "aururl":
config.AURURL = value
case "save":
shouldSaveConfig = true
case "afterclean":
config.CleanAfter = true
case "noafterclean":
config.CleanAfter = false
case "devel":
config.Devel = true
case "nodevel":
config.Devel = false
case "timeupdate":
config.TimeUpdate = true
case "notimeupdate":
config.TimeUpdate = false
case "topdown":
config.SortMode = TopDown
case "bottomup":
config.SortMode = BottomUp
case "completioninterval":
n, err := strconv.Atoi(value)
if err == nil {
config.CompletionInterval = n
}
case "sortby":
config.SortBy = value
case "noconfirm":
config.NoConfirm = true
case "config":
config.PacmanConf = value
case "redownload":
config.ReDownload = "yes"
case "redownloadall":
config.ReDownload = "all"
case "noredownload":
config.ReDownload = "no"
case "rebuild":
config.ReBuild = "yes"
case "rebuildall":
config.ReBuild = "all"
case "rebuildtree":
config.ReBuild = "tree"
case "norebuild":
config.ReBuild = "no"
case "answerclean":
config.AnswerClean = value
case "noanswerclean":
config.AnswerClean = ""
case "answerdiff":
config.AnswerDiff = value
case "noanswerdiff":
config.AnswerDiff = ""
case "answeredit":
config.AnswerEdit = value
case "noansweredit":
config.AnswerEdit = ""
case "answerupgrade":
config.AnswerUpgrade = value
case "noanswerupgrade":
config.AnswerUpgrade = ""
case "gitclone":
config.GitClone = true
case "nogitclone":
config.GitClone = false
case "gpgflags":
config.GpgFlags = value
case "mflags":
config.MFlags = value
case "gitflags":
config.GitFlags = value
case "builddir":
config.BuildDir = value
case "editor":
config.Editor = value
case "editorflags":
config.EditorFlags = value
case "makepkg":
config.MakepkgBin = value
case "makepkgconf":
config.MakepkgConf = value
case "nomakepkgconf":
config.MakepkgConf = ""
case "pacman":
config.PacmanBin = value
case "tar":
config.TarBin = value
case "git":
config.GitBin = value
case "gpg":
config.GpgBin = value
case "requestsplitn":
n, err := strconv.Atoi(value)
if err == nil && n > 0 {
config.RequestSplitN = n
}
case "sudoloop":
config.SudoLoop = true
case "nosudoloop":
config.SudoLoop = false
case "provides":
config.Provides = true
case "noprovides":
config.Provides = false
case "pgpfetch":
config.PGPFetch = true
case "nopgpfetch":
config.PGPFetch = false
case "upgrademenu":
config.UpgradeMenu = true
case "noupgrademenu":
config.UpgradeMenu = false
case "cleanmenu":
config.CleanMenu = true
case "nocleanmenu":
config.CleanMenu = false
case "diffmenu":
config.DiffMenu = true
case "nodiffmenu":
config.DiffMenu = false
case "editmenu":
config.EditMenu = true
case "noeditmenu":
config.EditMenu = false
case "useask":
config.UseAsk = true
case "nouseask":
config.UseAsk = false
case "combinedupgrade":
config.CombinedUpgrade = true
case "nocombinedupgrade":
config.CombinedUpgrade = false
case "a", "aur":
mode = ModeAUR
case "repo":
mode = ModeRepo
case "removemake":
config.RemoveMake = "yes"
case "noremovemake":
config.RemoveMake = "no"
case "askremovemake":
config.RemoveMake = "ask"
default:
return false
}
return true
}
func isOp(op string) bool {
switch op {
case "V", "version":
case "D", "database":
case "F", "files":
case "Q", "query":
case "R", "remove":
case "S", "sync":
case "T", "deptest":
case "U", "upgrade":
// yay specific
case "Y", "yay":
case "P", "show":
case "G", "getpkgbuild":
default:
return false
}
return true
}
func isGlobal(op string) bool {
switch op {
case "b", "dbpath":
return true
case "r", "root":
return true
case "v", "verbose":
return true
case "arch":
return true
case "cachedir":
return true
case "color":
return true
case "config":
return true
case "debug":
return true
case "gpgdir":
return true
case "hookdir":
return true
case "logfile":
return true
case "noconfirm":
return true
case "confirm":
return true
default:
return false
}
return true
}
func hasParam(arg string) bool {
switch arg {
case "dbpath", "b":
return true
case "root", "r":
return true
case "sysroot":
return true
case "config":
return true
case "ignore":
return true
case "assume-installed":
return true
case "overwrite":
return true
case "ask":
return true
case "cachedir":
return true
case "hookdir":
return true
case "logfile":
return true
case "ignoregroup":
return true
case "arch":
return true
case "print-format":
return true
case "gpgdir":
return true
case "color":
return true
//yay params
case "aururl":
case "mflags":
return true
case "gpgflags":
return true
case "gitflags":
return true
case "builddir":
return true
case "editor":
return true
case "editorflags":
return true
case "makepkg":
return true
case "makepkgconf":
case "pacman":
return true
case "tar":
return true
case "git":
return true
case "gpg":
return true
case "requestsplitn":
return true
case "answerclean":
return true
case "answerdiff":
return true
case "answeredit":
return true
case "answerupgrade":
return true
case "completioninterval":
case "sortby":
return true
default:
return false
}
return true
}
// Parses short hand options such as:
@ -496,8 +748,8 @@ func (parser *arguments) parseShortOption(arg string, param string) (usedNext bo
char := string(_char)
if hasParam(char) {
if k < len(arg)-2 {
err = parser.addParam(char, arg[k+2:])
if k < len(arg)-1 {
err = parser.addParam(char, arg[k+1:])
} else {
usedNext = true
err = parser.addParam(char, param)
@ -526,7 +778,10 @@ func (parser *arguments) parseLongOption(arg string, param string) (usedNext boo
arg = arg[2:]
if hasParam(arg) {
split := strings.SplitN(arg, "=", 2)
if len(split) == 2 {
err = parser.addParam(split[0], split[1])
} else if hasParam(arg) {
err = parser.addParam(arg, param)
usedNext = true
} else {
@ -536,21 +791,15 @@ func (parser *arguments) parseLongOption(arg string, param string) (usedNext boo
return
}
func (parser *arguments) parseStdin() (err error) {
for {
var target string
_, err = fmt.Scan(&target)
func (parser *arguments) parseStdin() error {
scanner := bufio.NewScanner(os.Stdin)
scanner.Split(bufio.ScanLines)
if err != nil {
if err == io.EOF {
err = nil
}
return
}
parser.addTarget(target)
for scanner.Scan() {
parser.addTarget(scanner.Text())
}
return os.Stdin.Close()
}
func (parser *arguments) parseCommandLine() (err error) {
@ -592,10 +841,10 @@ func (parser *arguments) parseCommandLine() (err error) {
parser.op = "Y"
}
if cmdArgs.existsArg("-") {
if parser.existsArg("-") {
var file *os.File
err = cmdArgs.parseStdin()
cmdArgs.delArg("-")
err = parser.parseStdin()
parser.delArg("-")
if err != nil {
return
@ -610,10 +859,28 @@ func (parser *arguments) parseCommandLine() (err error) {
os.Stdin = file
}
cmdArgs.extractYayOptions()
return
}
//parses input for number menus
func (parser *arguments) extractYayOptions() {
for option, value := range parser.options {
if handleConfig(option, value) {
parser.delArg(option)
}
}
for option, value := range parser.globals {
if handleConfig(option, value) {
parser.delArg(option)
}
}
rpc.AURURL = strings.TrimRight(config.AURURL, "/") + "/rpc.php?"
config.AURURL = strings.TrimRight(config.AURURL, "/")
}
//parses input for number menus splitted by spaces or commas
//supports individual selection: 1 2 3 4
//supports range selections: 1-4 10-20
//supports negation: ^1 ^1-4
@ -629,7 +896,9 @@ func parseNumberMenu(input string) (intRanges, intRanges, stringSet, stringSet)
otherInclude := make(stringSet)
otherExclude := make(stringSet)
words := strings.Fields(input)
words := strings.FieldsFunc(input, func(c rune) bool {
return unicode.IsSpace(c) || c == ','
})
for _, word := range words {
var num1 int

View file

@ -65,6 +65,7 @@ func TestParseNumberMenu(t *testing.T) {
"abort all none",
"a-b ^a-b ^abort",
"1\t2 3 4\t\t \t 5",
"1 2,3, 4, 5,6 ,7 ,8",
"",
" \t ",
"A B C D E",
@ -78,6 +79,7 @@ func TestParseNumberMenu(t *testing.T) {
{intRanges{}, intRanges{}, makeStringSet("abort", "all", "none"), make(stringSet)},
{intRanges{}, intRanges{}, makeStringSet("a-b"), makeStringSet("abort", "a-b")},
{intRanges{makeIntRange(1, 1), makeIntRange(2, 2), makeIntRange(3, 3), makeIntRange(4, 4), makeIntRange(5, 5)}, intRanges{}, make(stringSet), make(stringSet)},
{intRanges{makeIntRange(1, 1), makeIntRange(2, 2), makeIntRange(3, 3), makeIntRange(4, 4), makeIntRange(5, 5), makeIntRange(6, 6), makeIntRange(7, 7), makeIntRange(8, 8)}, intRanges{}, make(stringSet), make(stringSet)},
{intRanges{}, intRanges{}, make(stringSet), make(stringSet)},
{intRanges{}, intRanges{}, make(stringSet), make(stringSet)},
{intRanges{}, intRanges{}, makeStringSet("a", "b", "c", "d", "e"), make(stringSet)},

View file

@ -143,11 +143,12 @@ func (s repoQuery) printSearch() {
// Pretty print a set of packages from the same package base.
// Packages foo and bar from a pkgbase named base would print like so:
// base (foo bar)
func formatPkgbase(pkg *rpc.Pkg, bases map[string][]*rpc.Pkg) string {
func (base Base) String() string {
pkg := base[0]
str := pkg.PackageBase
if len(bases[pkg.PackageBase]) > 1 || pkg.PackageBase != pkg.Name {
if len(base) > 1 || pkg.PackageBase != pkg.Name {
str2 := " ("
for _, split := range bases[pkg.PackageBase] {
for _, split := range base {
str2 += split.Name + " "
}
str2 = str2[:len(str2)-1] + ")"
@ -210,18 +211,19 @@ func (do *depOrder) Print() {
}
}
for _, pkg := range do.Aur {
pkgStr := " " + pkg.PackageBase + "-" + pkg.Version
for _, base := range do.Aur {
pkg := base.Pkgbase()
pkgStr := " " + pkg + "-" + base[0].Version
pkgStrMake := pkgStr
push := false
pushMake := false
if len(do.Bases[pkg.PackageBase]) > 1 || pkg.PackageBase != pkg.Name {
if len(base) > 1 || pkg != base[0].Name {
pkgStr += " ("
pkgStrMake += " ("
for _, split := range do.Bases[pkg.PackageBase] {
for _, split := range base {
if do.Runtime.get(split.Name) {
pkgStr += split.Name + " "
aurLen++
@ -235,7 +237,7 @@ func (do *depOrder) Print() {
pkgStr = pkgStr[:len(pkgStr)-1] + ")"
pkgStrMake = pkgStrMake[:len(pkgStrMake)-1] + ")"
} else if do.Runtime.get(pkg.Name) {
} else if do.Runtime.get(base[0].Name) {
aurLen++
push = true
} else {
@ -268,6 +270,10 @@ func printDownloads(repoName string, length int, packages string) {
}
func printInfoValue(str, value string) {
if value == "" {
value = "None"
}
fmt.Printf(bold("%-16s%s")+" %s\n", str, ":", value)
}
@ -275,9 +281,12 @@ func printInfoValue(str, value string) {
func PrintInfo(a *rpc.Pkg) {
printInfoValue("Repository", "aur")
printInfoValue("Name", a.Name)
printInfoValue("Keywords", strings.Join(a.Keywords, " "))
printInfoValue("Version", a.Version)
printInfoValue("Description", a.Description)
printInfoValue("URL", a.URL)
printInfoValue("AUR URL", config.AURURL+"/packages/"+a.Name)
printInfoValue("Groups", strings.Join(a.Groups, " "))
printInfoValue("Licenses", strings.Join(a.License, " "))
printInfoValue("Provides", strings.Join(a.Provides, " "))
printInfoValue("Depends On", strings.Join(a.Depends, " "))
@ -288,12 +297,22 @@ func PrintInfo(a *rpc.Pkg) {
printInfoValue("Maintainer", a.Maintainer)
printInfoValue("Votes", fmt.Sprintf("%d", a.NumVotes))
printInfoValue("Popularity", fmt.Sprintf("%f", a.Popularity))
printInfoValue("First Submitted", formatTime(a.FirstSubmitted))
printInfoValue("Last Modified", formatTime(a.LastModified))
if a.OutOfDate != 0 {
printInfoValue("Out-of-date", "Yes ["+formatTime(a.OutOfDate)+"]")
} else {
printInfoValue("Out-of-date", "No")
}
if cmdArgs.existsDouble("i") {
printInfoValue("ID", fmt.Sprintf("%d", a.ID))
printInfoValue("Package Base ID", fmt.Sprintf("%d", a.PackageBaseID))
printInfoValue("Package Base", a.PackageBase)
printInfoValue("Snapshot URL", config.AURURL+a.URLPath)
}
fmt.Println()
}
@ -568,9 +587,9 @@ func colourHash(name string) (output string) {
if !useColor {
return name
}
var hash = 5381
var hash uint = 5381
for i := 0; i < len(name); i++ {
hash = int(name[i]) + ((hash << 5) + (hash))
hash = uint(name[i]) + ((hash << 5) + (hash))
}
return fmt.Sprintf("\x1b[%dm%s\x1b[0m", hash%6+31, name)
}
@ -595,8 +614,8 @@ func providerMenu(dep string, providers providers) *rpc.Pkg {
fmt.Print("\nEnter a number (default=1): ")
if config.NoConfirm {
fmt.Println()
break
fmt.Println("1")
return providers.Pkgs[0]
}
reader := bufio.NewReader(os.Stdin)

View file

@ -11,7 +11,7 @@ func benchmarkPrintSearch(search string, b *testing.B) {
os.Stdout = w
for n := 0; n < b.N; n++ {
res, _, _ := queryRepo(append([]string{}, search))
res, _ := queryRepo(append([]string{}, search))
res.printSearch()
}
os.Stdout = old

View file

@ -169,7 +169,7 @@ func syncSearch(pkgS []string) (err error) {
aq, aurErr = narrowSearch(pkgS, true)
}
if mode == ModeRepo || mode == ModeAny {
pq, _, repoErr = queryRepo(pkgS)
pq, repoErr = queryRepo(pkgS)
if repoErr != nil {
return err
}
@ -229,7 +229,7 @@ func syncInfo(pkgS []string) (err error) {
arguments := cmdArgs.copy()
arguments.clearTargets()
arguments.addTarget(repoS...)
err = passToPacman(arguments)
err = show(passToPacman(arguments))
if err != nil {
return
@ -254,52 +254,29 @@ func syncInfo(pkgS []string) (err error) {
}
// Search handles repo searches. Creates a RepoSearch struct.
func queryRepo(pkgInputN []string) (s repoQuery, n int, err error) {
func queryRepo(pkgInputN []string) (s repoQuery, err error) {
dbList, err := alpmHandle.SyncDbs()
if err != nil {
return
}
// BottomUp functions
initL := func(len int) int {
if config.SortMode == TopDown {
return 0
dbList.ForEach(func(db alpm.Db) error {
if len(pkgInputN) == 0 {
pkgs := db.PkgCache()
s = append(s, pkgs.Slice()...)
} else {
pkgs := db.Search(pkgInputN)
s = append(s, pkgs.Slice()...)
}
return len - 1
}
compL := func(len int, i int) bool {
if config.SortMode == TopDown {
return i < len
return nil
})
if config.SortMode == BottomUp {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
return i > -1
}
finalL := func(i int) int {
if config.SortMode == TopDown {
return i + 1
}
return i - 1
}
dbS := dbList.Slice()
lenDbs := len(dbS)
for f := initL(lenDbs); compL(lenDbs, f); f = finalL(f) {
pkgS := dbS[f].PkgCache().Slice()
lenPkgs := len(pkgS)
for i := initL(lenPkgs); compL(lenPkgs, i); i = finalL(i) {
match := true
for _, pkgN := range pkgInputN {
if !(strings.Contains(pkgS[i].Name(), pkgN) || strings.Contains(strings.ToLower(pkgS[i].Description()), pkgN)) {
match = false
break
}
}
if match {
n++
s = append(s, pkgS[i])
}
}
}
return
}
@ -500,16 +477,13 @@ func aurInfo(names []string, warnings *aurWarnings) ([]*rpc.Pkg, error) {
seen := make(map[string]int)
var mux sync.Mutex
var wg sync.WaitGroup
var err error
var errs MultiError
makeRequest := func(n, max int) {
defer wg.Done()
tempInfo, requestErr := rpc.Info(names[n:max])
if err != nil {
return
}
errs.Add(requestErr)
if requestErr != nil {
err = requestErr
return
}
mux.Lock()
@ -528,7 +502,7 @@ func aurInfo(names []string, warnings *aurWarnings) ([]*rpc.Pkg, error) {
wg.Wait()
if err != nil {
if err := errs.Return(); err != nil {
return info, err
}

27
testdata/pacman.conf vendored Normal file
View file

@ -0,0 +1,27 @@
[options]
RootDir = /
CacheDir = /cachedir/ /another/
LogFile = /logfile
GPGDir = /gpgdir/
HookDir = /hookdir/
UseDelta = 0.5
Architecture = 8086
IgnorePkg = ignore
IgnorePkg = this
IgnorePkg = package
IgnoreGroup = ignore
IgnoreGroup = this
IgnoreGroup = group
NoUpgrade = noupgrade
NoExtract = noextract
CheckSpace
TotalDownload
ILoveCandy
VerbosePkgLists
[repo1]
Server = repo1
[repo2]
Server = repo2

View file

@ -3,13 +3,11 @@ package main
import (
"fmt"
"sort"
"strings"
"sync"
"unicode"
alpm "github.com/jguer/go-alpm"
rpc "github.com/mikkeloscar/aur"
pkgb "github.com/mikkeloscar/gopkgbuild"
)
// upgrade type describes a system upgrade.
@ -63,69 +61,54 @@ func (u upSlice) Less(i, j int) bool {
}
func getVersionDiff(oldVersion, newversion string) (left, right string) {
old, errOld := pkgb.NewCompleteVersion(oldVersion)
new, errNew := pkgb.NewCompleteVersion(newversion)
if errOld != nil {
left = red("Invalid Version")
}
if errNew != nil {
right = red("Invalid Version")
func getVersionDiff(oldVersion, newVersion string) (left, right string) {
if oldVersion == newVersion {
return oldVersion + red(""), newVersion + green("")
}
if errOld == nil && errNew == nil {
oldVersion := old.String()
newVersion := new.String()
diffPosition := 0
if oldVersion == newVersion {
return oldVersion, newVersion
}
diffPosition := 0
checkWords := func(str string, index int, words ...string) bool {
for _, word := range words {
wordLength := len(word)
nextIndex := index + 1
if (index < len(str)-wordLength) &&
(str[nextIndex:(nextIndex+wordLength)] == word) {
return true
}
}
return false
}
for index, char := range oldVersion {
charIsSpecial := !(unicode.IsLetter(char) || unicode.IsNumber(char))
if (index >= len(newVersion)) || (char != rune(newVersion[index])) {
if charIsSpecial {
diffPosition = index
}
break
}
if charIsSpecial ||
(((index == len(oldVersion)-1) || (index == len(newVersion)-1)) &&
((len(oldVersion) != len(newVersion)) ||
(oldVersion[index] == newVersion[index]))) ||
checkWords(oldVersion, index, "rc", "pre", "alpha", "beta") {
diffPosition = index + 1
checkWords := func(str string, index int, words ...string) bool {
for _, word := range words {
wordLength := len(word)
nextIndex := index + 1
if (index < len(str)-wordLength) &&
(str[nextIndex:(nextIndex+wordLength)] == word) {
return true
}
}
samePart := oldVersion[0:diffPosition]
left = samePart + red(oldVersion[diffPosition:])
right = samePart + green(newVersion[diffPosition:])
return false
}
for index, char := range oldVersion {
charIsSpecial := !(unicode.IsLetter(char) || unicode.IsNumber(char))
if (index >= len(newVersion)) || (char != rune(newVersion[index])) {
if charIsSpecial {
diffPosition = index
}
break
}
if charIsSpecial ||
(((index == len(oldVersion)-1) || (index == len(newVersion)-1)) &&
((len(oldVersion) != len(newVersion)) ||
(oldVersion[index] == newVersion[index]))) ||
checkWords(oldVersion, index, "rc", "pre", "alpha", "beta") {
diffPosition = index + 1
}
}
samePart := oldVersion[0:diffPosition]
left = samePart + red(oldVersion[diffPosition:])
right = samePart + green(newVersion[diffPosition:])
return
}
// upList returns lists of packages to upgrade from each source.
func upList(warnings *aurWarnings) (aurUp upSlice, repoUp upSlice, err error) {
func upList(warnings *aurWarnings) (upSlice, upSlice, error) {
local, remote, _, remoteNames, err := filterPackages()
if err != nil {
return nil, nil, err
@ -133,55 +116,55 @@ func upList(warnings *aurWarnings) (aurUp upSlice, repoUp upSlice, err error) {
var wg sync.WaitGroup
var develUp upSlice
var repoUp upSlice
var aurUp upSlice
var repoErr error
var aurErr error
var develErr error
var errs MultiError
pkgdata := make(map[string]*rpc.Pkg)
aurdata := make(map[string]*rpc.Pkg)
if mode == ModeAny || mode == ModeRepo {
fmt.Println(bold(cyan("::") + bold(" Searching databases for updates...")))
wg.Add(1)
go func() {
repoUp, repoErr = upRepo(local)
repoUp, err = upRepo(local)
errs.Add(err)
wg.Done()
}()
}
if mode == ModeAny || mode == ModeAUR {
fmt.Println(bold(cyan("::") + bold(" Searching AUR for updates...")))
wg.Add(1)
go func() {
aurUp, aurErr = upAUR(remote, remoteNames, pkgdata, warnings)
wg.Done()
}()
if config.Devel {
fmt.Println(bold(cyan("::") + bold(" Checking development packages...")))
var _aurdata []*rpc.Pkg
_aurdata, err = aurInfo(remoteNames, warnings)
errs.Add(err)
if err == nil {
for _, pkg := range _aurdata {
aurdata[pkg.Name] = pkg
}
wg.Add(1)
go func() {
develUp, develErr = upDevel(remote)
aurUp, err = upAUR(remote, aurdata)
errs.Add(err)
wg.Done()
}()
if config.Devel {
fmt.Println(bold(cyan("::") + bold(" Checking development packages...")))
wg.Add(1)
go func() {
develUp = upDevel(remote, aurdata)
wg.Done()
}()
}
}
}
wg.Wait()
printLocalNewerThanAUR(remote, pkgdata)
errs := make([]string, 0)
for _, e := range []error{repoErr, aurErr, develErr} {
if e != nil {
errs = append(errs, e.Error())
}
}
if len(errs) > 0 {
err = fmt.Errorf("%s", strings.Join(errs, "\n"))
return nil, nil, err
}
printLocalNewerThanAUR(remote, aurdata)
if develUp != nil {
names := make(stringSet)
@ -197,10 +180,10 @@ func upList(warnings *aurWarnings) (aurUp upSlice, repoUp upSlice, err error) {
aurUp = develUp
}
return aurUp, repoUp, err
return aurUp, repoUp, errs.Return()
}
func upDevel(remote []alpm.Package) (toUpgrade upSlice, err error) {
func upDevel(remote []alpm.Package, aurdata map[string]*rpc.Pkg) (toUpgrade upSlice) {
toUpdate := make([]alpm.Package, 0)
toRemove := make([]string, 0)
@ -212,12 +195,14 @@ func upDevel(remote []alpm.Package) (toUpgrade upSlice, err error) {
defer wg.Done()
if e.needsUpdate() {
for _, pkg := range remote {
if pkg.Name() == vcsName {
mux1.Lock()
toUpdate = append(toUpdate, pkg)
mux1.Unlock()
return
if _, ok := aurdata[vcsName]; ok {
for _, pkg := range remote {
if pkg.Name() == vcsName {
mux1.Lock()
toUpdate = append(toUpdate, pkg)
mux1.Unlock()
return
}
}
}
@ -248,22 +233,11 @@ func upDevel(remote []alpm.Package) (toUpgrade upSlice, err error) {
// upAUR gathers foreign packages and checks if they have new versions.
// Output: Upgrade type package list.
func upAUR(
remote []alpm.Package, remoteNames []string,
pkgdata map[string]*rpc.Pkg, warnings *aurWarnings) (upSlice, error) {
func upAUR(remote []alpm.Package, aurdata map[string]*rpc.Pkg) (upSlice, error) {
toUpgrade := make(upSlice, 0)
_pkgdata, err := aurInfo(remoteNames, warnings)
if err != nil {
return nil, err
}
for _, pkg := range _pkgdata {
pkgdata[pkg.Name] = pkg
}
for _, pkg := range remote {
aurPkg, ok := pkgdata[pkg.Name()]
aurPkg, ok := aurdata[pkg.Name()]
if !ok {
continue
}
@ -284,28 +258,29 @@ func upAUR(
func printIgnoringPackage(pkg alpm.Package, newPkgVersion string) {
left, right := getVersionDiff(pkg.Version(), newPkgVersion)
fmt.Println(
yellow(bold(smallArrow)) + fmt.Sprintf(
" Ignoring package upgrade: %s (%s -> %s)",
cyan(pkg.Name()), left, right))
fmt.Printf("%s %s: ignoring package upgrade (%s => %s)\n",
yellow(bold(smallArrow)),
cyan(pkg.Name()),
left, right,
)
}
func printLocalNewerThanAUR(
remote []alpm.Package, pkgdata map[string]*rpc.Pkg) {
remote []alpm.Package, aurdata map[string]*rpc.Pkg) {
for _, pkg := range remote {
aurPkg, ok := pkgdata[pkg.Name()]
aurPkg, ok := aurdata[pkg.Name()]
if !ok {
continue
}
left, right := getVersionDiff(pkg.Version(), aurPkg.Version)
if !isDevelName(pkg.Name()) &&
alpm.VerCmp(pkg.Version(), aurPkg.Version) > 0 {
fmt.Println(
yellow(bold(smallArrow)) + fmt.Sprintf(
" Local package is newer than AUR: %s (%s -> %s)",
cyan(pkg.Name()), left, right))
if !isDevelName(pkg.Name()) && alpm.VerCmp(pkg.Version(), aurPkg.Version) > 0 {
fmt.Printf("%s %s: local (%s) is newer than AUR (%s)\n",
yellow(bold(smallArrow)),
cyan(pkg.Name()),
left, right,
)
}
}
}
@ -313,23 +288,38 @@ func printLocalNewerThanAUR(
// upRepo gathers local packages and checks if they have new versions.
// Output: Upgrade type package list.
func upRepo(local []alpm.Package) (upSlice, error) {
dbList, err := alpmHandle.SyncDbs()
if err != nil {
return nil, err
}
slice := upSlice{}
for _, pkg := range local {
newPkg := pkg.NewVersion(dbList)
if newPkg != nil {
if pkg.ShouldIgnore() {
printIgnoringPackage(pkg, newPkg.Version())
} else {
slice = append(slice, upgrade{pkg.Name(), newPkg.DB().Name(), pkg.Version(), newPkg.Version()})
}
}
localDB, err := alpmHandle.LocalDb()
if err != nil {
return slice, err
}
err = alpmHandle.TransInit(alpm.TransFlagNoLock)
if err != nil {
return slice, err
}
defer alpmHandle.TransRelease()
alpmHandle.SyncSysupgrade(cmdArgs.existsDouble("u", "sysupgrade"))
alpmHandle.TransGetAdd().ForEach(func(pkg alpm.Package) error {
localPkg, err := localDB.PkgByName(pkg.Name())
localVer := "-"
if err == nil {
localVer = localPkg.Version()
}
slice = append(slice, upgrade{
pkg.Name(),
pkg.DB().Name(),
localVer,
pkg.Version(),
})
return nil
})
return slice, nil
}

View file

@ -32,7 +32,7 @@ func TestGetVersionDiff(t *testing.T) {
}
out := []versionPair{
{"1-1", "1-1"},
{"1-1" + red(""), "1-1" + green("")},
{red("1-1"), green("2-1")},
{red("2-1"), green("1-1")},
{"1-" + red("1"), "1-" + green("2")},
@ -56,7 +56,7 @@ func TestGetVersionDiff(t *testing.T) {
o, n := getVersionDiff(pair.Old, pair.New)
if o != out[i].Old || n != out[i].New {
t.Errorf("Test %d failed for update: (%s => %s) expected (%s => %s) got (%s => %s)", i+1, in[i].Old, in[i].New, out[i].Old, out[i].New, o, n)
t.Errorf("Test %d failed for update: expected (%s => %s) got (%s => %s) %d %d %d %d", i+1, in[i].Old, in[i].New, o, n, len(in[i].Old), len(in[i].New), len(o), len(n))
}
}
}

View file

@ -2,6 +2,7 @@ package main
import (
"fmt"
"sync"
"unicode"
)
@ -128,3 +129,36 @@ func removeInvalidTargets(targets []string) []string {
return filteredTargets
}
type MultiError struct {
Errors []error
mux sync.Mutex
}
func (err *MultiError) Error() string {
str := ""
for _, e := range err.Errors {
str += e.Error() + "\n"
}
return str[:len(str)-1]
}
func (err *MultiError) Add(e error) {
if e == nil {
return
}
err.mux.Lock()
err.Errors = append(err.Errors, e)
err.mux.Unlock()
}
func (err *MultiError) Return() error {
if len(err.Errors) > 0 {
return err
}
return nil
}

86
vcs.go
View file

@ -5,12 +5,11 @@ import (
"encoding/json"
"fmt"
"os"
"os/exec"
"strings"
"sync"
"time"
rpc "github.com/mikkeloscar/aur"
gopkg "github.com/mikkeloscar/gopkgbuild"
gosrc "github.com/Morganamilo/go-srcinfo"
)
// Info contains the last commit sha of a repo
@ -24,8 +23,8 @@ type shaInfo struct {
// createDevelDB forces yay to create a DB of the existing development packages
func createDevelDB() error {
infoMap := make(map[string]*rpc.Pkg)
srcinfosStale := make(map[string]*gopkg.PKGBUILD)
var mux sync.Mutex
var wg sync.WaitGroup
_, _, _, remoteNames, err := filterPackages()
if err != nil {
@ -37,39 +36,25 @@ func createDevelDB() error {
return err
}
for _, pkg := range info {
infoMap[pkg.Name] = pkg
}
bases := getBases(info)
toSkip := pkgbuildsToSkip(bases, sliceToStringSet(remoteNames))
downloadPkgbuilds(bases, toSkip, config.BuildDir)
srcinfos, _ := parseSrcinfoFiles(bases, false)
bases := getBases(infoMap)
toSkip := pkgBuildsToSkip(info, sliceToStringSet(remoteNames))
downloadPkgBuilds(info, bases, toSkip)
tryParsesrcinfosFile(info, srcinfosStale, bases)
for _, pkg := range info {
pkgbuild, ok := srcinfosStale[pkg.PackageBase]
if !ok {
continue
}
for _, pkg := range bases[pkg.PackageBase] {
updateVCSData(pkg.Name, pkgbuild.Source)
for _, pkgbuild := range srcinfos {
for _, pkg := range pkgbuild.Packages {
wg.Add(1)
go updateVCSData(pkg.Pkgname, pkgbuild.Source, &mux, &wg)
}
}
wg.Wait()
fmt.Println(bold(yellow(arrow) + bold(" GenDB finished. No packages were installed")))
return err
}
// parseSource returns the git url, default branch and protocols it supports
func parseSource(source string) (url string, branch string, protocols []string) {
if !(strings.Contains(source, "git://") ||
strings.Contains(source, ".git") ||
strings.Contains(source, "git+https://")) {
return "", "", nil
}
split := strings.Split(source, "::")
source = split[len(split)-1]
split = strings.SplitN(source, "://", 2)
@ -77,8 +62,20 @@ func parseSource(source string) (url string, branch string, protocols []string)
if len(split) != 2 {
return "", "", nil
}
protocols = strings.Split(split[0], "+")
git := false
for _, protocol := range protocols {
if protocol == "git" {
git = true
break
}
}
if !git {
return "", "", nil
}
split = strings.SplitN(split[1], "#", 2)
if len(split) == 2 {
secondSplit := strings.SplitN(split[1], "=", 2)
@ -97,27 +94,35 @@ func parseSource(source string) (url string, branch string, protocols []string)
branch = "HEAD"
}
url = strings.Split(url, "?")[0]
branch = strings.Split(branch, "?")[0]
return
}
func updateVCSData(pkgName string, sources []string) {
func updateVCSData(pkgName string, sources []gosrc.ArchString, mux *sync.Mutex, wg *sync.WaitGroup) {
defer wg.Done()
if savedInfo == nil {
mux.Lock()
savedInfo = make(vcsInfo)
mux.Unlock()
}
info := make(shaInfos)
for _, source := range sources {
url, branch, protocols := parseSource(source)
checkSource := func(source gosrc.ArchString) {
defer wg.Done()
url, branch, protocols := parseSource(source.Value)
if url == "" || branch == "" {
continue
return
}
commit := getCommit(url, branch, protocols)
if commit == "" {
continue
return
}
mux.Lock()
info[url] = shaInfo{
protocols,
branch,
@ -125,9 +130,14 @@ func updateVCSData(pkgName string, sources []string) {
}
savedInfo[pkgName] = info
fmt.Println(bold(yellow(arrow)) + " Found git repo: " + cyan(url))
saveVCSInfo()
mux.Unlock()
}
for _, source := range sources {
wg.Add(1)
go checkSource(source)
}
}
@ -135,9 +145,9 @@ func getCommit(url string, branch string, protocols []string) string {
for _, protocol := range protocols {
var outbuf bytes.Buffer
cmd := exec.Command(config.GitBin, "ls-remote", protocol+"://"+url, branch)
cmd := passToGit("", "ls-remote", protocol+"://"+url, branch)
cmd.Stdout = &outbuf
cmd.Env = append(cmd.Env, "GIT_TERMINAL_PROMPT=0")
cmd.Env = append(os.Environ(), "GIT_TERMINAL_PROMPT=0")
err := cmd.Start()
if err != nil {

View file

@ -49,7 +49,7 @@ func TestParsing(t *testing.T) {
{"github.com/davidgiven/ack", "HEAD", []string{"git"}},
{"", "", nil},
{"", "", nil},
{"github.com/jguer/yay.git", "foo", []string{"a", "b", "c", "d", "e", "f"}},
{"", "", nil},
}
for n, url := range urls {
@ -60,7 +60,7 @@ func TestParsing(t *testing.T) {
branch != compare.Branch ||
!isEqual(protocols, compare.Protocols) {
t.Fatalf("Test %d failed: Expected: url=%+v branch=%+v protocols=%+v\ngot url=%+v branch=%+v protocols=%+v", n+1, url, branch, protocols, compare.URL, compare.Branch, compare.Protocols)
t.Fatalf("Test %d failed: Expected: url=%+v branch=%+v protocols=%+v\ngot url=%+v branch=%+v protocols=%+v", n+1, compare.URL, compare.Branch, compare.Protocols, url, branch, protocols)
}
}

20
vendor/github.com/Morganamilo/go-pacmanconf/cmd.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
package pacmanconf
import (
"bytes"
"os/exec"
)
func pacmanconf(args []string) (string, string, error) {
var outbuf, errbuf bytes.Buffer
cmd := exec.Command("pacman-conf", args...)
cmd.Stdout = &outbuf
cmd.Stderr = &errbuf
err := cmd.Run()
stdout := outbuf.String()
stderr := errbuf.String()
return stdout, stderr, err
}

65
vendor/github.com/Morganamilo/go-pacmanconf/ini/ini.go generated vendored Normal file
View file

@ -0,0 +1,65 @@
package ini
import (
"strings"
"io/ioutil"
)
type Callback func(fileName string, line int, section string,
key string, value string, data interface{}) error
func Parse(ini string, cb Callback, data interface{}) error {
return parse("", ini, cb, data)
}
func ParseFile(fileName string, cb Callback, data interface{}) error {
file, err := ioutil.ReadFile(fileName)
if err != nil {
return cb(fileName, -1, err.Error(), "", "", data)
}
return parse(fileName, string(file), cb, data)
}
func parse(fileName string, ini string, cb Callback, data interface{}) error {
lines := strings.Split(ini, "\n")
header := ""
for n, line := range lines {
line = strings.TrimSpace(line)
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue
}
if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
runes := []rune(line)
header = string(runes[1 : len(runes)-1])
if err := cb(fileName, n, header, "", "", data); err != nil {
return err
}
continue
}
key, value := splitPair(line)
if err := cb(fileName, n, header, key, value, data); err != nil {
return err
}
}
return nil
}
func splitPair(line string) (string, string) {
split := strings.SplitN(line, "=", 2)
key := strings.TrimSpace(split[0])
if len(split) == 1 {
return key, ""
}
value := strings.TrimSpace(split[1])
return key, value
}

View file

@ -0,0 +1,46 @@
package pacmanconf
type Repository struct {
Name string
Servers []string
SigLevel []string
Usage []string
}
type Config struct {
RootDir string
DBPath string
CacheDir []string
HookDir []string
GPGDir string
LogFile string
HoldPkg []string
IgnorePkg []string
IgnoreGroup []string
Architecture string
XferCommand string
NoUpgrade []string
NoExtract []string
CleanMethod []string
SigLevel []string
LocalFileSigLevel []string
RemoteFileSigLevel []string
UseSyslog bool
Color bool
UseDelta float64
TotalDownload bool
CheckSpace bool
VerbosePkgLists bool
DisableDownloadTimeout bool
Repos []Repository
}
func (conf *Config) Repository(name string) *Repository {
for _, repo := range conf.Repos {
if repo.Name == name {
return &repo
}
}
return nil
}

137
vendor/github.com/Morganamilo/go-pacmanconf/parser.go generated vendored Normal file
View file

@ -0,0 +1,137 @@
package pacmanconf
import (
"fmt"
"github.com/Morganamilo/go-pacmanconf/ini"
"strconv"
)
type callbackData struct {
conf *Config
repo *Repository
}
func parseCallback(fileName string, line int, section string,
key string, value string, data interface{}) error {
if line < 0 {
return fmt.Errorf("unable to read file: %s: %s", fileName, section)
}
d, ok := data.(*callbackData)
if !ok {
return fmt.Errorf("type assert failed when parsing: %s", fileName)
}
if key == "" && value == "" {
if section == "options" {
d.repo = nil
} else {
d.conf.Repos = append(d.conf.Repos, Repository{})
d.repo = &d.conf.Repos[len(d.conf.Repos)-1]
d.repo.Name = section
}
return nil
}
if section == "" {
return fmt.Errorf("line %d is not in a section: %s", line, fileName)
}
if d.repo == nil {
setOption(d.conf, key, value)
} else {
setRepo(d.repo, key, value)
}
return nil
}
func setRepo(repo *Repository, key string, value string) {
switch key {
case "Server":
repo.Servers = append(repo.Servers, value)
case "SigLevel":
repo.SigLevel = append(repo.SigLevel, value)
case "Usage":
repo.Usage = append(repo.Usage, value)
}
}
func setOption(conf *Config, key string, value string) {
switch key {
case "RootDir":
conf.RootDir = value
case "DBPath":
conf.DBPath = value
case "CacheDir":
conf.CacheDir = append(conf.CacheDir, value)
case "HookDir":
conf.HookDir = append(conf.HookDir, value)
case "GPGDir":
conf.GPGDir = value
case "LogFile":
conf.LogFile = value
case "HoldPkg":
conf.HoldPkg = append(conf.HoldPkg, value)
case "IgnorePkg":
conf.IgnorePkg = append(conf.IgnorePkg, value)
case "IgnoreGroup":
conf.IgnoreGroup = append(conf.IgnoreGroup, value)
case "Architecture":
conf.Architecture = value
case "XferCommand":
conf.XferCommand = value
case "NoUpgrade":
conf.NoUpgrade = append(conf.NoUpgrade, value)
case "NoExtract":
conf.NoExtract = append(conf.NoExtract, value)
case "CleanMethod":
conf.CleanMethod = append(conf.CleanMethod, value)
case "SigLevel":
conf.SigLevel = append(conf.SigLevel, value)
case "LocalFileSigLevel":
conf.LocalFileSigLevel = append(conf.LocalFileSigLevel, value)
case "RemoteFileSigLevel":
conf.RemoteFileSigLevel = append(conf.RemoteFileSigLevel, value)
case "UseSyslog":
conf.UseSyslog = true
case "Color":
conf.Color = true
case "UseDelta":
f, err := strconv.ParseFloat(value, 64)
if err == nil {
conf.UseDelta = f
}
case "TotalDownload":
conf.TotalDownload = true
case "CheckSpace":
conf.CheckSpace = true
case "VerbosePkgLists":
conf.VerbosePkgLists = true
case "DisableDownloadTimeout":
conf.DisableDownloadTimeout = true
}
}
func Parse(iniData string) (*Config, error) {
data := callbackData{&Config{}, nil}
err := ini.Parse(iniData, parseCallback, &data)
return data.conf, err
}
func PacmanConf(args ...string) (*Config, string, error) {
stdout, stderr, err := pacmanconf(args)
if err != nil {
return nil, stderr, err
}
conf, err := Parse(stdout)
return conf, "", err
}
func ParseFile(path string) (*Config, string, error) {
return PacmanConf("--config", path)
}

674
vendor/github.com/Morganamilo/go-srcinfo/LICENSE generated vendored Normal file
View file

@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

38
vendor/github.com/Morganamilo/go-srcinfo/line_error.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package srcinfo
import (
"fmt"
)
// LineError is an error type that stores the line number at which an error
// occurred as well the full Line that cased the error and an error string.
type LineError struct {
LineNumber int // The line number at which the error occurred
Line string // The line that caused the error
ErrorStr string // An error string
}
// Error Returns an error string in the format:
// "Line <LineNumber>: <ErrorStr>: <Line>".
func (le LineError) Error() string {
return fmt.Sprintf("Line %d: %s: %s", le.LineNumber, le.ErrorStr, le.Line)
}
// Error Returns a new LineError
func Error(LineNumber int, Line string, ErrorStr string) *LineError {
return &LineError{
LineNumber,
Line,
ErrorStr,
}
}
// Errorf Returns a new LineError using the same formatting rules as
// fmt.Printf.
func Errorf(LineNumber int, Line string, ErrorStr string, args ...interface{}) *LineError {
return &LineError{
LineNumber,
Line,
fmt.Sprintf(ErrorStr, args...),
}
}

310
vendor/github.com/Morganamilo/go-srcinfo/parser.go generated vendored Normal file
View file

@ -0,0 +1,310 @@
package srcinfo
import (
"fmt"
"io/ioutil"
"strings"
)
// parser is used to track our current state as we parse the srcinfo.
type parser struct {
// srcinfo is a Pointer to the Srcinfo we are currently building.
srcinfo *Srcinfo
// seenPkgnames is a set of pkgnames we have seen
seenPkgnames map[string]struct{}
}
func (psr *parser) currentPackage() (*Package, error) {
if psr.srcinfo.Pkgbase == "" {
return nil, fmt.Errorf("Not in pkgbase or pkgname")
} else if len(psr.srcinfo.Packages) == 0 {
return &psr.srcinfo.Package, nil
} else {
return &psr.srcinfo.Packages[len(psr.srcinfo.Packages) - 1], nil
}
}
func (psr *parser) setHeaderOrField(key, value string) error {
pkgbase := &psr.srcinfo.PackageBase
switch key {
case "pkgbase":
if psr.srcinfo.Pkgbase != "" {
return fmt.Errorf("key \"%s\" can not occur after pkgbase or pkgname", key)
}
pkgbase.Pkgbase = value
return nil
case "pkgname":
if psr.srcinfo.Pkgbase == "" {
return fmt.Errorf("key \"%s\" can not occur before pkgbase", key)
}
if _, ok := psr.seenPkgnames[value]; ok {
return fmt.Errorf("pkgname \"%s\" can not occur more than once", value)
}
psr.seenPkgnames[value] = struct{}{}
psr.srcinfo.Packages = append(psr.srcinfo.Packages, Package{Pkgname: value})
return nil
}
if psr.srcinfo.Pkgbase == "" {
return fmt.Errorf("key \"%s\" can not occur before pkgbase or pkgname", key)
}
return psr.setField(key, value)
}
func (psr *parser) setField(archKey, value string) error {
pkg, err := psr.currentPackage()
if err != nil {
return err
}
pkgbase := &psr.srcinfo.PackageBase
key, arch := splitArchFromKey(archKey)
err = checkArch(psr.srcinfo.Arch, archKey, arch)
if err != nil {
return err
}
if value == "" {
value = EmptyOverride
}
// pkgbase only + not arch dependent
found := true
switch archKey {
case "pkgver":
pkgbase.Pkgver = value
case "pkgrel":
pkgbase.Pkgrel = value
case "epoch":
pkgbase.Epoch = value
case "validpgpkeys":
pkgbase.ValidPGPKeys = append(pkgbase.ValidPGPKeys, value)
case "noextract":
pkgbase.NoExtract = append(pkgbase.NoExtract, value)
default:
found = false
}
if found {
if len(psr.srcinfo.Packages) > 0 {
return fmt.Errorf("key \"%s\" can not occur after pkgname", archKey)
}
return nil
}
// pkgbase only + arch dependent
found = true
switch key {
case "source":
pkgbase.Source = append(pkgbase.Source, ArchString{arch, value})
case "md5sums":
pkgbase.MD5Sums = append(pkgbase.MD5Sums, ArchString{arch, value})
case "sha1sums":
pkgbase.SHA1Sums = append(pkgbase.SHA1Sums, ArchString{arch, value})
case "sha224sums":
pkgbase.SHA224Sums = append(pkgbase.SHA224Sums, ArchString{arch, value})
case "sha256sums":
pkgbase.SHA256Sums = append(pkgbase.SHA256Sums, ArchString{arch, value})
case "sha384sums":
pkgbase.SHA384Sums = append(pkgbase.SHA384Sums, ArchString{arch, value})
case "sha512sums":
pkgbase.SHA512Sums = append(pkgbase.SHA512Sums, ArchString{arch, value})
case "makedepends":
pkgbase.MakeDepends = append(pkgbase.MakeDepends, ArchString{arch, value})
case "checkdepends":
pkgbase.CheckDepends = append(pkgbase.CheckDepends, ArchString{arch, value})
default:
found = false
}
if found {
if len(psr.srcinfo.Packages) > 0 {
return fmt.Errorf("key \"%s\" can not occur after pkgname", archKey)
}
return nil
}
// pkgbase or pkgname + not arch dependent
found = true
switch archKey {
case "pkgdesc":
pkg.Pkgdesc = value
case "url":
pkg.URL = value
case "license":
pkg.License = append(pkg.License, value)
case "install":
pkg.Install = value
case "changelog":
pkg.Changelog = value
case "groups":
pkg.Groups = append(pkg.Groups, value)
case "arch":
pkg.Arch = append(pkg.Arch, value)
case "backup":
pkg.Backup = append(pkg.Backup, value)
case "options":
pkg.Options = append(pkg.Options, value)
default:
found = false
}
if found {
return nil
}
// pkgbase or pkgname + arch dependent
switch key {
case "depends":
pkg.Depends = append(pkg.Depends, ArchString{arch, value})
case "optdepends":
pkg.OptDepends = append(pkg.OptDepends, ArchString{arch, value})
case "conflicts":
pkg.Conflicts = append(pkg.Conflicts, ArchString{arch, value})
case "provides":
pkg.Provides = append(pkg.Provides, ArchString{arch, value})
case "replaces":
pkg.Replaces = append(pkg.Replaces, ArchString{arch, value})
default:
return fmt.Errorf("Unknown key: \"%s\"", archKey)
}
return nil
}
func parse(data string) (*Srcinfo, error) {
psr := &parser{
&Srcinfo{},
make(map[string]struct{}),
}
lines := strings.Split(data, "\n")
for n, line := range lines {
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
key, value, err := splitPair(line)
if err != nil {
return nil, Error(n+1, line, err.Error())
}
err = psr.setHeaderOrField(key, value)
if err != nil {
return nil, Error(n+1, line, err.Error())
}
}
if psr.srcinfo.Pkgbase == "" {
return nil, fmt.Errorf("No pkgbase field")
}
if len(psr.srcinfo.Packages) == 0 {
return nil, fmt.Errorf("No pkgname field")
}
if psr.srcinfo.Pkgver == "" {
return nil, fmt.Errorf("No pkgver field")
}
if psr.srcinfo.Pkgrel == "" {
return nil, fmt.Errorf("No pkgrel field")
}
if len(psr.srcinfo.Arch) == 0 {
return nil, fmt.Errorf("No arch field")
}
return psr.srcinfo, nil
}
// splitPair splits a key value string in the form of "key = value",
// whitespace being ignored. The key and the value is returned.
func splitPair(line string) (string, string, error) {
split := strings.SplitN(line, "=", 2)
if len(split) != 2 {
return "", "", fmt.Errorf("Line does not contain =")
}
key := strings.TrimSpace(split[0])
value := strings.TrimSpace(split[1])
if key == "" {
return "", "", fmt.Errorf("Key is empty")
}
return key, value, nil
}
// splitArchFromKey splits up architecture dependent field names, separating
// the field name from the architecture they depend on.
func splitArchFromKey(key string) (string, string) {
split := strings.SplitN(key, "_", 2)
if len(split) == 2 {
return split[0], split[1]
}
return split[0], ""
}
// checkArg checks that the arch from an arch dependent string is actually
// defined inside of the srcinfo and speicifly disallows the arch "any" as it
// is not a real arch
func checkArch(arches []string, key string, arch string) error {
if arch == "" {
return nil
}
if arch == "any" {
return fmt.Errorf("Invalid key \"%s\" arch \"%s\" is not allowed", key, arch)
}
for _, a := range arches {
if a == arch {
return nil
}
}
return fmt.Errorf("Invalid key \"%s\" unsupported arch \"%s\"", key, arch)
}
// ParseFile parses a srcinfo file as specified by path.
func ParseFile(path string) (*Srcinfo, error) {
file, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("Unable to read file: %s: %s", path, err.Error())
}
return Parse(string(file))
}
// Parse parses a srcinfo in string form. Parsing will fail if:
// A srcinfo does not contain all required fields
// The same pkgname is specified more then once
// arch is missing
// pkgver is mising
// pkgrel is missing
// An architecture specific field is defined for an architecture that does not exist
// An unknown key is specified
// An empty value is specified
//
// Required fields are:
// pkgbase
// pkname
// arch
// pkgrel
// pkgver
func Parse(data string) (*Srcinfo, error) {
return parse(data)
}

View file

@ -0,0 +1,155 @@
package srcinfo
import (
"bytes"
)
func appendHeader(buffer *bytes.Buffer, key string, value string) {
if value == "" {
return
}
buffer.WriteString(key + " = " + value + "\n")
}
func appendValue(buffer *bytes.Buffer, key string, value string) {
if value == "" {
return
}
if value == EmptyOverride {
value = ""
}
buffer.WriteString("\t" + key + " = " + value + "\n")
}
func appendMultiValue(buffer *bytes.Buffer, key string, values []string) {
for _, value := range values {
if value == EmptyOverride {
value = ""
}
buffer.WriteString("\t" + key + " = " + value + "\n")
}
}
func appendMultiArchValue(buffer *bytes.Buffer, key string, values []ArchString) {
for _, value := range values {
if value.Value == EmptyOverride {
value.Value = ""
}
if value.Arch == "" {
buffer.WriteString("\t" + key + " = " + value.Value + "\n")
} else {
buffer.WriteString("\t" + key + "_" + value.Arch + " = " + value.Value + "\n")
}
}
}
//String generates a string that should be similar to the srcinfo data used to
//create this Srcinfo struct. Fields will be printed in order and with the same
//whitespcae rules that `makepkg --printsrcinfo` uses.
//
// The order of each global field is as follows:
// pkgdesc
// pkgver
// pkgrel
// epoch
// url
// install
// changelog
// arch
// groups
// license
// checkdepends
// makedepends
// depends
// optdepends
// provides
// conflicts
// replaces
// noextract
// options
// backup
// source
// validpgpkeys
// md5suns
// sha1sums
// sha224sums
// sha256sums
// sha384sums
// sha512sums
//
// The order of each overwritten field is as follows:
// pkgdesc
// url
// install
// changelog
// arch
// groups
// license
// checkdepends
// depends
// optdepends
// provides
// conflicts
// replaces
// options
// backup
func (si *Srcinfo) String() string {
var buffer bytes.Buffer
appendHeader(&buffer, "pkgbase", si.Pkgbase)
appendValue(&buffer, "pkgdesc", si.Pkgdesc)
appendValue(&buffer, "pkgver", si.Pkgver)
appendValue(&buffer, "pkgrel", si.Pkgrel)
appendValue(&buffer, "epoch", si.Epoch)
appendValue(&buffer, "url", si.URL)
appendValue(&buffer, "install", si.Install)
appendValue(&buffer, "changelog", si.Changelog)
appendMultiValue(&buffer, "arch", si.Arch)
appendMultiValue(&buffer, "groups", si.Groups)
appendMultiValue(&buffer, "license", si.License)
appendMultiArchValue(&buffer, "checkdepends", si.CheckDepends)
appendMultiArchValue(&buffer, "makedepends", si.MakeDepends)
appendMultiArchValue(&buffer, "depends", si.Depends)
appendMultiArchValue(&buffer, "optdepends", si.OptDepends)
appendMultiArchValue(&buffer, "provides", si.Provides)
appendMultiArchValue(&buffer, "conflicts", si.Conflicts)
appendMultiArchValue(&buffer, "replaces", si.Replaces)
appendMultiValue(&buffer, "noextract", si.NoExtract)
appendMultiValue(&buffer, "options", si.Options)
appendMultiValue(&buffer, "backup", si.Backup)
appendMultiValue(&buffer, "validpgpkeys", si.ValidPGPKeys)
appendMultiArchValue(&buffer, "source", si.Source)
appendMultiArchValue(&buffer, "md5sums", si.MD5Sums)
appendMultiArchValue(&buffer, "sha1sums", si.SHA1Sums)
appendMultiArchValue(&buffer, "sha224sums", si.SHA224Sums)
appendMultiArchValue(&buffer, "sha256sums", si.SHA256Sums)
appendMultiArchValue(&buffer, "sha384sums", si.SHA384Sums)
appendMultiArchValue(&buffer, "sha512sums", si.SHA512Sums)
for n, pkg := range si.Packages {
appendHeader(&buffer, "\npkgname", si.Packages[n].Pkgname)
appendValue(&buffer, "pkgdesc", pkg.Pkgdesc)
appendValue(&buffer, "url", pkg.URL)
appendValue(&buffer, "install", pkg.Install)
appendValue(&buffer, "changelog", pkg.Changelog)
appendMultiValue(&buffer, "arch", pkg.Arch)
appendMultiValue(&buffer, "groups", pkg.Groups)
appendMultiValue(&buffer, "license", pkg.License)
appendMultiArchValue(&buffer, "depends", pkg.Depends)
appendMultiArchValue(&buffer, "optdepends", pkg.OptDepends)
appendMultiArchValue(&buffer, "provides", pkg.Provides)
appendMultiArchValue(&buffer, "conflicts", pkg.Conflicts)
appendMultiArchValue(&buffer, "replaces", pkg.Replaces)
appendMultiValue(&buffer, "options", pkg.Options)
appendMultiValue(&buffer, "backup", pkg.Backup)
}
return buffer.String()
}

215
vendor/github.com/Morganamilo/go-srcinfo/srcinfo.go generated vendored Normal file
View file

@ -0,0 +1,215 @@
// Package srcinfo is a parser for srcinfo files. Typically generated by
// makepkg, part of the pacman package manager.
//
// Split packages and architecture dependent fields are fully supported.
//
// This Package aimes to parse srcinfos but not interpret them in any way.
// All values are fundamentally strings, other tools should be used for
// things such as dependency parsing, validity checking etc.
package srcinfo
import (
"fmt"
)
// ArchString describes string values that may be architecture dependent.
// For Example depends_x86_64.
// If Arch is an empty string then the field is not architecture dependent.
type ArchString struct {
Arch string // Architecture name
Value string // Value
}
// Package describes the fields of a pkgbuild that may be overwritten by
// in build_<pkgname> function.
type Package struct {
Pkgname string
Pkgdesc string
Arch []string
URL string
License []string
Groups []string
Depends []ArchString
OptDepends []ArchString
Provides []ArchString
Conflicts []ArchString
Replaces []ArchString
Backup []string
Options []string
Install string
Changelog string
}
// PackageBase describes the fields of a pkgbuild that may not be overwritten
// in package_<pkgname> function.
type PackageBase struct {
Pkgbase string
Pkgver string
Pkgrel string
Epoch string
Source []ArchString
ValidPGPKeys []string
NoExtract []string
MD5Sums []ArchString
SHA1Sums []ArchString
SHA224Sums []ArchString
SHA256Sums []ArchString
SHA384Sums []ArchString
SHA512Sums []ArchString
MakeDepends []ArchString
CheckDepends []ArchString
}
// Srcinfo represents a full srcinfo. All global fields are defined here while
// fields overwritten in the package_<pkgname> function are defined in the
// Packages field.
//
// Note: The Packages field only contains the values that each package
// overrides, global fields will be missing. A Package containing both global
// and overwritten fields can be generated using the SplitPackage function.
type Srcinfo struct {
PackageBase // Fields that only apply to the package base
Package // Fields that apply to the package globally
Packages []Package // Fields for each package this package base contains
}
// EmptyOverride is used to signal when a value has been overridden with an
// empty value. An empty ovrride is when a value is defined in the pkgbuild but
// then overridden inside the package function to be empty.
//
// For example "pkgdesc=''" is an empty override on the pkgdesc which would
// lead to the line "pkgdesc=" in the srcinfo.
//
// This value is used internally to store empty overrides, mainly to avoid
// using string pointers. It is possible to check for empty overrides using
// the Packages slice in Packagebase.
//
// During normal use with the SplitPackage function this value will be
// converted back to an empty string, or removed entirely for slice values.
// This means the this value can be completley ignored unless you are
// explicitly looking for empty overrides.
const EmptyOverride = "\x00"
// Version formats a version string from the epoch, pkgver and pkgrel of the
// srcinfo. In the format [epoch:]pkgver-pkgrel.
func (si *Srcinfo) Version() string {
if si.Epoch == "" {
return si.Pkgver + "-" + si.Pkgrel
}
return si.Epoch + ":" + si.Pkgver + "-" + si.Pkgrel
}
// SplitPackages generates a splice of all packages that are part of this
// srcinfo. This is equivalent to calling SplitPackage on every pkgname.
func (si *Srcinfo) SplitPackages() []*Package {
pkgs := make([]*Package, 0, len(si.Packages))
for _, pkg := range si.Packages {
pkgs = append(pkgs, mergeSplitPackage(&si.Package, &pkg))
}
return pkgs
}
// SplitPackage generates a Package that contains all fields that the specified
// pkgname has. But will fall back on global fields if they are not defined in
// the Package.
//
// Note slice values will be passed by reference, it is not recommended you
// modify this struct after it is returned.
func (si *Srcinfo) SplitPackage(pkgname string) (*Package, error) {
for n := range si.Packages {
if si.Packages[n].Pkgname == pkgname {
return mergeSplitPackage(&si.Package, &si.Packages[n]), nil
}
}
return nil, fmt.Errorf("Package \"%s\" is not part of the package base \"%s\"", pkgname, si.Pkgbase)
}
func mergeArchSlice(global, override []ArchString) []ArchString {
overridden := make(map[string]struct{})
merged := make([]ArchString, 0, len(override))
for _, v := range override {
overridden[v.Arch] = struct{}{}
if v.Value == EmptyOverride {
continue
}
merged = append(merged, v)
}
for _, v := range global {
if _, ok := overridden[v.Arch]; !ok {
merged = append(merged, v)
}
}
return merged
}
func mergeSplitPackage(base, split *Package) *Package {
pkg := &Package{}
*pkg = *base
pkg.Pkgname = split.Pkgname
if split.Pkgdesc != "" {
pkg.Pkgdesc = split.Pkgdesc
}
if len(split.Arch) != 0 {
pkg.Arch = split.Arch
}
if split.URL != "" {
pkg.URL = split.URL
}
if len(split.License) != 0 {
pkg.License = split.License
}
if len(split.Groups) != 0 {
pkg.Groups = split.Groups
}
if len(split.Depends) != 0 {
pkg.Depends = mergeArchSlice(pkg.Depends, split.Depends)
}
if len(split.OptDepends) != 0 {
pkg.OptDepends = mergeArchSlice(pkg.OptDepends, split.OptDepends)
}
if len(split.Provides) != 0 {
pkg.Provides = mergeArchSlice(pkg.Provides, split.Provides)
}
if len(split.Conflicts) != 0 {
pkg.Conflicts = mergeArchSlice(pkg.Conflicts, split.Conflicts)
}
if len(split.Replaces) != 0 {
pkg.Replaces = mergeArchSlice(pkg.Replaces, split.Replaces)
}
if len(split.Backup) != 0 {
pkg.Backup = split.Backup
}
if len(split.Options) != 0 {
pkg.Options = split.Options
}
if split.Changelog != "" {
pkg.Changelog = split.Changelog
}
if split.Install != "" {
pkg.Install = split.Install
}
return pkg
}

View file

@ -9,7 +9,7 @@
#include <stdarg.h>
#include <alpm.h>
void logCallback(uint16_t level, char *cstring);
void logCallback(alpm_loglevel_t level, char *cstring);
void questionCallback(alpm_question_t *question);
void go_alpm_log_cb(alpm_loglevel_t level, const char *fmt, va_list arg) {

View file

@ -9,7 +9,7 @@ package alpm
/*
#include <stdint.h>
#include <alpm.h>
void logCallback(uint16_t level, char *cstring);
void logCallback(alpm_loglevel_t level, char *cstring);
void go_alpm_log_cb(alpm_loglevel_t level, const char *fmt, va_list arg);
void go_alpm_set_logging(alpm_handle_t *handle);
void go_alpm_set_question(alpm_handle_t *handle);
@ -20,37 +20,37 @@ import (
"unsafe"
)
type logCallbackSig func(uint16, string)
type logCallbackSig func(LogLevel, string)
type questionCallbackSig func(QuestionAny)
var DefaultLogLevel = LogWarning
func DefaultLogCallback(lvl uint16, s string) {
func DefaultLogCallback(lvl LogLevel, s string) {
if lvl <= DefaultLogLevel {
print("go-alpm: ", s)
}
}
var log_callback logCallbackSig
var question_callback questionCallbackSig
var globalLogCallback logCallbackSig
var globalQuestionCallback questionCallbackSig
//export logCallback
func logCallback(level uint16, cstring *C.char) {
log_callback(level, C.GoString(cstring))
func logCallback(level C.alpm_loglevel_t, cstring *C.char) {
globalLogCallback(LogLevel(level), C.GoString(cstring))
}
//export questionCallback
func questionCallback(question *C.alpm_question_t) {
q := (*C.alpm_question_any_t)(unsafe.Pointer(question))
question_callback(QuestionAny{q})
globalQuestionCallback(QuestionAny{q})
}
func (h *Handle) SetLogCallback(cb logCallbackSig) {
log_callback = cb
globalLogCallback = cb
C.go_alpm_set_logging(h.ptr)
}
func (h *Handle) SetQuestionCallback(cb questionCallbackSig) {
question_callback = cb
globalQuestionCallback = cb
C.go_alpm_set_question(h.ptr)
}

View file

@ -1,401 +0,0 @@
// conf.go - Functions for pacman.conf parsing.
//
// Copyright (c) 2013 The go-alpm Authors
//
// MIT Licensed. See LICENSE for details.
package alpm
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"reflect"
"strconv"
"strings"
"syscall"
)
type PacmanOption uint
const (
ConfUseSyslog PacmanOption = 1 << iota
ConfColor
ConfTotalDownload
ConfCheckSpace
ConfVerbosePkgLists
ConfILoveCandy
)
var optionsMap = map[string]PacmanOption{
"UseSyslog": ConfUseSyslog,
"Color": ConfColor,
"TotalDownload": ConfTotalDownload,
"CheckSpace": ConfCheckSpace,
"VerbosePkgLists": ConfVerbosePkgLists,
"ILoveCandy": ConfILoveCandy,
}
// PacmanConfig is a type for holding pacman options parsed from pacman
// configuration data passed to ParseConfig.
type PacmanConfig struct {
RootDir string
DBPath string
CacheDir []string
HookDir []string
GPGDir string
LogFile string
HoldPkg []string
IgnorePkg []string
IgnoreGroup []string
Include []string
Architecture string
XferCommand string
NoUpgrade []string
NoExtract []string
CleanMethod []string
SigLevel SigLevel
LocalFileSigLevel SigLevel
RemoteFileSigLevel SigLevel
UseDelta float64
Options PacmanOption
Repos []RepoConfig
}
// RepoConfig is a type that stores the signature level of a repository
// specified in the pacman config file.
type RepoConfig struct {
Name string
SigLevel SigLevel
Servers []string
}
// Constants for pacman configuration parsing
const (
tokenSection = iota
tokenKey
tokenComment
)
type iniToken struct {
Type uint
Name string
Values []string
}
type confReader struct {
*bufio.Reader
Lineno uint
}
// newConfReader reads from the io.Reader if it is buffered and returns a
// confReader containing the number of bytes read and 0 for the first line. If
// r is not a buffered reader, a new buffered reader is created using r as its
// input and returned.
func newConfReader(r io.Reader) confReader {
if buf, ok := r.(*bufio.Reader); ok {
return confReader{buf, 0}
}
buf := bufio.NewReader(r)
return confReader{buf, 0}
}
func (rdr *confReader) ParseLine() (tok iniToken, err error) {
line, overflow, err := rdr.ReadLine()
switch {
case err != nil:
return
case overflow:
err = fmt.Errorf("line %d too long", rdr.Lineno)
return
}
rdr.Lineno++
line = bytes.TrimSpace(line)
if len(line) == 0 {
tok.Type = tokenComment
return
}
switch line[0] {
case '#':
tok.Type = tokenComment
return
case '[':
closing := bytes.IndexByte(line, ']')
if closing < 0 {
err = fmt.Errorf("missing ']' is section name at line %d", rdr.Lineno)
return
}
tok.Name = string(line[1:closing])
if closing+1 < len(line) {
err = fmt.Errorf("trailing characters %q after section name %s",
line[closing+1:], tok.Name)
return
}
return
default:
tok.Type = tokenKey
if idx := bytes.IndexByte(line, '='); idx >= 0 {
optname := bytes.TrimSpace(line[:idx])
values := bytes.Split(line[idx+1:], []byte{' '})
tok.Name = string(optname)
tok.Values = make([]string, 0, len(values))
for _, word := range values {
word = bytes.TrimSpace(word)
if len(word) > 0 {
tok.Values = append(tok.Values, string(word))
}
}
} else {
// boolean option
tok.Name = string(line)
tok.Values = nil
}
return
}
}
func ParseConfig(r io.Reader) (conf PacmanConfig, err error) {
rdr := newConfReader(r)
rdrStack := []confReader{rdr}
conf.SetDefaults()
confReflect := reflect.ValueOf(&conf).Elem()
var currentSection string
var curRepo *RepoConfig
lineloop:
for {
line, err := rdr.ParseLine()
// fmt.Printf("%+v\n", line)
switch err {
case io.EOF:
// pop reader stack.
l := len(rdrStack)
if l == 1 {
break lineloop
}
rdr = rdrStack[l-2]
rdrStack = rdrStack[:l-1]
default:
break lineloop
case nil:
// Ok.
}
switch line.Type {
case tokenComment:
case tokenSection:
currentSection = line.Name
if currentSection != "options" {
conf.Repos = append(conf.Repos, RepoConfig{})
curRepo = &conf.Repos[len(conf.Repos)-1]
curRepo.Name = line.Name
}
case tokenKey:
switch line.Name {
case "SigLevel":
// TODO: implement SigLevel parsing.
continue lineloop
case "Server":
curRepo.Servers = append(curRepo.Servers, line.Values...)
continue lineloop
case "Include":
conf.Include = append(conf.Include, line.Values[0])
f, err := os.Open(line.Values[0])
if err != nil {
err = fmt.Errorf("error while processing Include directive at line %d: %s",
rdr.Lineno, err)
break lineloop
}
rdr = newConfReader(f)
rdrStack = append(rdrStack, rdr)
continue lineloop
case "UseDelta":
if len(line.Values) > 0 {
deltaRatio, err := strconv.ParseFloat(line.Values[0], 64)
if err != nil {
break lineloop
}
conf.UseDelta = deltaRatio
}
continue lineloop
}
if currentSection != "options" {
err = fmt.Errorf("option %s outside of [options] section, at line %d",
line.Name, rdr.Lineno)
break lineloop
}
// main options.
if opt, ok := optionsMap[line.Name]; ok {
// boolean option.
conf.Options |= opt
} else {
// key-value option.
fld := confReflect.FieldByName(line.Name)
if !fld.IsValid() || !fld.CanAddr() {
_ = fmt.Errorf("unknown option at line %d: %s", rdr.Lineno, line.Name)
continue
}
switch fieldP := fld.Addr().Interface().(type) {
case *string:
// single valued option.
*fieldP = strings.Join(line.Values, " ")
case *[]string:
//many valued option.
*fieldP = append(*fieldP, line.Values...)
}
}
}
}
if len(conf.CleanMethod) == 0 {
conf.CleanMethod = []string{"KeepInstalled"}
}
if len(conf.CacheDir) == 0 {
conf.CacheDir = []string{"/var/cache/pacman/pkg/"} //should only be set if the config does not specify this
}
return conf, err
}
func (conf *PacmanConfig) SetDefaults() {
conf.RootDir = "/"
conf.DBPath = "/var/lib/pacman"
conf.DBPath = "/var/lib/pacman/"
conf.HookDir = []string{"/etc/pacman.d/hooks/"} //should be added to whatever the config states
conf.GPGDir = "/etc/pacman.d/gnupg/"
conf.LogFile = "/var/log/pacman.log"
conf.UseDelta = 0.7
conf.SigLevel = SigPackage | SigPackageOptional | SigDatabase | SigDatabaseOptional
conf.LocalFileSigLevel = SigUseDefault
conf.RemoteFileSigLevel = SigUseDefault
}
func getArch() (string, error) {
var uname syscall.Utsname
err := syscall.Uname(&uname)
if err != nil {
return "", err
}
var arch [65]byte
for i, c := range uname.Machine {
if c == 0 {
return string(arch[:i]), nil
}
arch[i] = byte(c)
}
return string(arch[:]), nil
}
func (conf *PacmanConfig) CreateHandle() (*Handle, error) {
h, err := Init(conf.RootDir, conf.DBPath)
if err != nil {
return nil, err
}
if conf.Architecture == "auto" {
conf.Architecture, err = getArch()
if err != nil {
return nil, fmt.Errorf("architecture is 'auto' but couldn't uname()")
}
}
for _, repoconf := range conf.Repos {
// TODO: set SigLevel
db, err := h.RegisterSyncDb(repoconf.Name, 0)
if err == nil {
for i, addr := range repoconf.Servers {
addr = strings.Replace(addr, "$repo", repoconf.Name, -1)
addr = strings.Replace(addr, "$arch", conf.Architecture, -1)
repoconf.Servers[i] = addr
}
db.SetServers(repoconf.Servers)
}
}
err = h.SetCacheDirs(conf.CacheDir...)
if err != nil {
return nil, err
}
// add hook directories 1-by-1 to avoid overwriting the system directory
for _, dir := range conf.HookDir {
err = h.AddHookDir(dir)
if err != nil {
return nil, err
}
}
err = h.SetGPGDir(conf.GPGDir)
if err != nil {
return nil, err
}
err = h.SetLogFile(conf.LogFile)
if err != nil {
return nil, err
}
err = h.SetIgnorePkgs(conf.IgnorePkg...)
if err != nil {
return nil, err
}
err = h.SetIgnoreGroups(conf.IgnoreGroup...)
if err != nil {
return nil, err
}
err = h.SetArch(conf.Architecture)
if err != nil {
return nil, err
}
h.SetNoUpgrades(conf.NoUpgrade...)
if err != nil {
return nil, err
}
h.SetNoExtracts(conf.NoExtract...)
if err != nil {
return nil, err
}
err = h.SetDefaultSigLevel(conf.SigLevel)
if err != nil {
return nil, err
}
err = h.SetLocalFileSigLevel(conf.LocalFileSigLevel)
if err != nil {
return nil, err
}
err = h.SetRemoteFileSigLevel(conf.RemoteFileSigLevel)
if err != nil {
return nil, err
}
err = h.SetDeltaRatio(conf.UseDelta)
if err != nil {
return nil, err
}
err = h.SetUseSyslog(conf.Options&ConfUseSyslog > 0)
if err != nil {
return nil, err
}
err = h.SetCheckSpace(conf.Options&ConfCheckSpace > 0)
if err != nil {
return nil, err
}
return h, nil
}

View file

@ -8,6 +8,7 @@ package alpm
/*
#include <alpm.h>
#include <alpm_list.h>
*/
import "C"
@ -117,6 +118,11 @@ func (db Db) SetServers(servers []string) {
}
}
// SetUsage sets the Usage of the database
func (db Db) SetUsage(usage Usage) {
C.alpm_db_set_usage(db.ptr, C.int(usage))
}
// PkgByName searches a package in db.
func (db Db) PkgByName(name string) (*Package, error) {
cName := C.CString(name)
@ -151,3 +157,15 @@ func (db Db) PkgCache() PackageList {
pkgcache := (*list)(unsafe.Pointer(C.alpm_db_get_pkgcache(db.ptr)))
return PackageList{pkgcache, db.handle}
}
func (db Db) Search(targets []string) PackageList {
var needles *C.alpm_list_t
for _, str := range targets {
needles = C.alpm_list_add(needles, unsafe.Pointer(C.CString(str)))
}
pkglist := (*list)(unsafe.Pointer(C.alpm_db_search(db.ptr, needles)))
C.alpm_list_free(needles)
return PackageList{pkglist, db.handle}
}

View file

@ -89,9 +89,11 @@ const (
SigStatusKeyDisabled
)
type LogLevel uint16
// Logging levels.
const (
LogError uint16 = 1 << iota
LogError LogLevel = 1 << iota
LogWarning
LogDebug
LogFunction
@ -118,3 +120,38 @@ const (
ValidationSignature
ValidationUnkown Validation = 0
)
type Usage int
const (
UsageSync Usage = 1 << iota
UsageSearch
UsageInstall
UsageUpgrade
UsageAll = (1 << 4) - 1
)
type TransFlag int
const (
TransFlagNoDeps TransFlag = 1 << iota
TransFlagForce
TransFlagNoSave
TransFlagNoDepVersion
TransFlagCascade
TransFlagRecurse
// 7 is missing
_
TransFlagDbOnly
TransFlagAllDeps
TransFlagDownloadOnly
TransFlagNoScriptlets
// 12 is missing
_
TransFlagNoConflicts
TransFlagNeeded
TransFlagAllExplicit
TransFlagUnneeded
TransFlagRecurseAll
TransFlagNoLock
)

View file

@ -20,27 +20,29 @@ import (
"unsafe"
)
// Handle contains the pointer to the alpm handle
type Handle struct {
ptr *C.alpm_handle_t
}
// Initialize
// Init initializes alpm handle
func Init(root, dbpath string) (*Handle, error) {
c_root := C.CString(root)
c_dbpath := C.CString(dbpath)
var c_err C.alpm_errno_t
h := C.alpm_initialize(c_root, c_dbpath, &c_err)
cRoot := C.CString(root)
cDBPath := C.CString(dbpath)
var cErr C.alpm_errno_t
h := C.alpm_initialize(cRoot, cDBPath, &cErr)
defer C.free(unsafe.Pointer(c_root))
defer C.free(unsafe.Pointer(c_dbpath))
defer C.free(unsafe.Pointer(cRoot))
defer C.free(unsafe.Pointer(cDBPath))
if c_err != 0 {
return nil, Error(c_err)
if cErr != 0 {
return nil, Error(cErr)
}
return &Handle{h}, nil
}
// Release releases the alpm handle
func (h *Handle) Release() error {
if er := C.alpm_release(h.ptr); er != 0 {
return Error(er)
@ -52,9 +54,9 @@ func (h *Handle) Release() error {
// LastError gets the last pm_error
func (h Handle) LastError() error {
if h.ptr != nil {
c_err := C.alpm_errno(h.ptr)
if c_err != 0 {
return Error(c_err)
cErr := C.alpm_errno(h.ptr)
if cErr != 0 {
return Error(cErr)
}
}
return nil
@ -76,12 +78,12 @@ func (h Handle) optionGetList(f func(*C.alpm_handle_t) *C.alpm_list_t) (StringLi
}
func (h Handle) optionSetList(hookDirs []string, f func(*C.alpm_handle_t, *C.alpm_list_t) C.int) error {
var list *C.alpm_list_t = nil
var list *C.alpm_list_t
for _, dir := range hookDirs {
c_dir := C.CString(dir)
list = C.alpm_list_add(list, unsafe.Pointer(c_dir))
defer C.free(unsafe.Pointer(c_dir))
cDir := C.CString(dir)
list = C.alpm_list_add(list, unsafe.Pointer(cDir))
defer C.free(unsafe.Pointer(cDir))
}
ok := f(h.ptr, list)
@ -92,9 +94,9 @@ func (h Handle) optionSetList(hookDirs []string, f func(*C.alpm_handle_t, *C.alp
}
func (h Handle) optionAddList(hookDir string, f func(*C.alpm_handle_t, *C.char) C.int) error {
c_hookdir := C.CString(hookDir)
defer C.free(unsafe.Pointer(c_hookdir))
ok := f(h.ptr, c_hookdir)
cHookDir := C.CString(hookDir)
defer C.free(unsafe.Pointer(cHookDir))
ok := f(h.ptr, cHookDir)
if ok < 0 {
return h.LastError()
}
@ -102,9 +104,9 @@ func (h Handle) optionAddList(hookDir string, f func(*C.alpm_handle_t, *C.char)
}
func (h Handle) optionRemoveList(dir string, f func(*C.alpm_handle_t, *C.char) C.int) (bool, error) {
c_dir := C.CString(dir)
ok := f(h.ptr, c_dir)
defer C.free(unsafe.Pointer(c_dir))
cDir := C.CString(dir)
ok := f(h.ptr, cDir)
defer C.free(unsafe.Pointer(cDir))
if ok < 0 {
return ok == 1, h.LastError()
}
@ -112,9 +114,9 @@ func (h Handle) optionRemoveList(dir string, f func(*C.alpm_handle_t, *C.char) C
}
func (h Handle) optionMatchList(dir string, f func(*C.alpm_handle_t, *C.char) C.int) (bool, error) {
c_dir := C.CString(dir)
ok := f(h.ptr, c_dir)
defer C.free(unsafe.Pointer(c_dir))
cDir := C.CString(dir)
ok := f(h.ptr, cDir)
defer C.free(unsafe.Pointer(cDir))
if ok == 0 {
return true, nil
} else if ok == C.FNM_NOMATCH {
@ -125,9 +127,9 @@ func (h Handle) optionMatchList(dir string, f func(*C.alpm_handle_t, *C.char) C.
//helper functions for *char based getters and setters
func (h Handle) optionGetStr(f func(*C.alpm_handle_t) *C.char) (string, error) {
c_str := f(h.ptr)
str := C.GoString(c_str)
if c_str == nil {
cStr := f(h.ptr)
str := C.GoString(cStr)
if cStr == nil {
return str, h.LastError()
}
@ -135,9 +137,9 @@ func (h Handle) optionGetStr(f func(*C.alpm_handle_t) *C.char) (string, error) {
}
func (h Handle) optionSetStr(str string, f func(*C.alpm_handle_t, *C.char) C.int) error {
c_str := C.CString(str)
defer C.free(unsafe.Pointer(c_str))
ok := f(h.ptr, c_str)
cStr := C.CString(str)
defer C.free(unsafe.Pointer(cStr))
ok := f(h.ptr, cStr)
if ok < 0 {
h.LastError()
@ -253,12 +255,12 @@ func (h Handle) UseSyslog() (bool, error) {
}
func (h Handle) SetUseSyslog(value bool) error {
var int_value C.int = 0
var intValue C.int
if value {
int_value = 1
intValue = 1
}
ok := C.alpm_option_set_usesyslog(h.ptr, int_value)
ok := C.alpm_option_set_usesyslog(h.ptr, intValue)
if ok < 0 {
return h.LastError()
}
@ -395,10 +397,10 @@ func (h Handle) AssumeInstalled() (DependList, error) {
}
func (h Handle) AddAssumeInstalled(dep Depend) error {
c_dep := convertCDepend(dep)
defer freeCDepend(c_dep)
cDep := convertCDepend(dep)
defer freeCDepend(cDep)
ok := C.alpm_option_add_assumeinstalled(h.ptr, c_dep)
ok := C.alpm_option_add_assumeinstalled(h.ptr, cDep)
if ok < 0 {
return h.LastError()
}
@ -417,12 +419,12 @@ func (h Handle) SetAssumeInstalled(deps ...Depend) error {
//although for the sake of completeness it would be nice to have this
//working
panic("This function (SetAssumeInstalled) does not work properly, please do not use. See source code for more details")
var list *C.alpm_list_t = nil
var list *C.alpm_list_t
for _, dep := range deps {
c_dep := convertCDepend(dep)
defer freeCDepend(c_dep)
list = C.alpm_list_add(list, unsafe.Pointer(c_dep))
cDep := convertCDepend(dep)
defer freeCDepend(cDep)
list = C.alpm_list_add(list, unsafe.Pointer(cDep))
}
ok := C.alpm_option_set_assumeinstalled(h.ptr, list)
@ -448,10 +450,10 @@ func (h Handle) RemoveAssumeInstalled(dep Depend) (bool, error) {
//although for the sake of completeness it would be nice to have this
//working
panic("This function (RemoveAssumeInstalled) does not work properly, please do not use. See source code for more details")
c_dep := convertCDepend(dep)
defer freeCDepend(c_dep)
cDep := convertCDepend(dep)
defer freeCDepend(cDep)
ok := C.alpm_option_remove_assumeinstalled(h.ptr, c_dep)
ok := C.alpm_option_remove_assumeinstalled(h.ptr, cDep)
if ok < 0 {
return ok == 1, h.LastError()
}
@ -465,8 +467,8 @@ func (h Handle) Arch() (string, error) {
}
func (h Handle) SetArch(str string) error {
return h.optionSetStr(str, func(handle *C.alpm_handle_t, c_str *C.char) C.int {
return C.alpm_option_set_arch(handle, c_str)
return h.optionSetStr(str, func(handle *C.alpm_handle_t, cStr *C.char) C.int {
return C.alpm_option_set_arch(handle, cStr)
})
}
@ -500,12 +502,12 @@ func (h Handle) CheckSpace() (bool, error) {
}
func (h Handle) SetCheckSpace(value bool) error {
var int_value C.int = 0
var cValue C.int
if value {
int_value = 1
cValue = 1
}
ok := C.alpm_option_set_checkspace(h.ptr, int_value)
ok := C.alpm_option_set_checkspace(h.ptr, cValue)
if ok < 0 {
return h.LastError()
}
@ -519,8 +521,8 @@ func (h Handle) DBExt() (string, error) {
}
func (h Handle) SetDBExt(str string) error {
return h.optionSetStr(str, func(handle *C.alpm_handle_t, c_str *C.char) C.int {
return C.alpm_option_set_dbext(handle, c_str)
return h.optionSetStr(str, func(handle *C.alpm_handle_t, cStr *C.char) C.int {
return C.alpm_option_set_dbext(handle, cStr)
})
}

View file

@ -272,8 +272,8 @@ func (pkg Package) ComputeRequiredBy() []string {
for i := (*list)(unsafe.Pointer(result)); i != nil; i = i.Next {
defer C.free(unsafe.Pointer(i))
if i.Data != nil {
defer C.free(unsafe.Pointer(i.Data))
name := C.GoString((*C.char)(unsafe.Pointer(i.Data)))
defer C.free(i.Data)
name := C.GoString((*C.char)(i.Data))
requiredby = append(requiredby, name)
}
}
@ -287,8 +287,8 @@ func (pkg Package) ComputeOptionalFor() []string {
for i := (*list)(unsafe.Pointer(result)); i != nil; i = i.Next {
defer C.free(unsafe.Pointer(i))
if i.Data != nil {
defer C.free(unsafe.Pointer(i.Data))
name := C.GoString((*C.char)(unsafe.Pointer(i.Data)))
defer C.free(i.Data)
name := C.GoString((*C.char)(i.Data))
optionalfor = append(optionalfor, name)
}
}

27
vendor/github.com/jguer/go-alpm/sync.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
// db.go - Functions for database handling.
//
// Copyright (c) 2013 The go-alpm Authors
//
// MIT Licensed. See LICENSE for details.
package alpm
/*
#include <alpm.h>
*/
import "C"
func (h *Handle) SyncSysupgrade(enableDowngrade bool) error {
intEnableDowngrade := C.int(0)
if enableDowngrade {
intEnableDowngrade = C.int(1)
}
ret := C.alpm_sync_sysupgrade(h.ptr, intEnableDowngrade)
if ret != 0 {
return h.LastError()
}
return nil
}

54
vendor/github.com/jguer/go-alpm/trans.go generated vendored Normal file
View file

@ -0,0 +1,54 @@
// db.go - Functions for database handling.
//
// Copyright (c) 2013 The go-alpm Authors
//
// MIT Licensed. See LICENSE for details.
package alpm
/*
#include <alpm.h>
*/
import "C"
import (
"unsafe"
)
func (h *Handle) TransInit(flags TransFlag) error {
ret := C.alpm_trans_init(h.ptr, C.int(flags))
if ret != 0 {
return h.LastError()
}
return nil
}
func (h *Handle) TransRelease() error {
ret := C.alpm_trans_release(h.ptr)
if ret != 0 {
return h.LastError()
}
return nil
}
func (h *Handle) TransGetAdd() PackageList {
pkgs := C.alpm_trans_get_add(h.ptr)
return PackageList{(*list)(unsafe.Pointer(pkgs)), *h}
}
func (h *Handle) TransGetRemove() PackageList {
pkgs := C.alpm_trans_get_remove(h.ptr)
return PackageList{(*list)(unsafe.Pointer(pkgs)), *h}
}
func (h *Handle) TransGetFalgs() (TransFlag, error) {
flags := C.alpm_trans_get_flags(h.ptr)
if flags == -1 {
return -1, h.LastError()
}
return TransFlag(flags), nil
}

View file

@ -16,7 +16,7 @@ import (
"unsafe"
)
// Description of a dependency.
// Depend provides a description of a dependency.
type Depend struct {
Name string
Version string
@ -36,19 +36,19 @@ func convertDepend(dep *C.alpm_depend_t) Depend {
}
func convertCDepend(dep Depend) *C.alpm_depend_t {
c_name := C.CString(dep.Name)
c_version := C.CString(dep.Version)
c_desc := C.CString(dep.Description)
cName := C.CString(dep.Name)
cVersion := C.CString(dep.Version)
cDesc := C.CString(dep.Description)
c_dep := C.alpm_depend_t{
name: c_name,
version: c_version,
desc: c_desc,
cDep := C.alpm_depend_t{
name: cName,
version: cVersion,
desc: cDesc,
name_hash: C.ulong(dep.NameHash),
mod: C.alpm_depmod_t(dep.Mod),
}
return &c_dep
return &cDep
}
func freeCDepend(dep *C.alpm_depend_t) {
@ -61,7 +61,7 @@ func (dep Depend) String() string {
return dep.Name + dep.Mod.String() + dep.Version
}
// Description of package files.
// File provides a description of package files.
type File struct {
Name string
Size int64
@ -72,18 +72,18 @@ func convertFilelist(files *C.alpm_filelist_t) []File {
size := int(files.count)
items := make([]File, size)
raw_items := reflect.SliceHeader{
rawItems := reflect.SliceHeader{
Len: size,
Cap: size,
Data: uintptr(unsafe.Pointer(files.files))}
c_files := *(*[]C.alpm_file_t)(unsafe.Pointer(&raw_items))
cFiles := *(*[]C.alpm_file_t)(unsafe.Pointer(&rawItems))
for i := 0; i < size; i++ {
items[i] = File{
Name: C.GoString(c_files[i].name),
Size: int64(c_files[i].size),
Mode: uint32(c_files[i].mode)}
Name: C.GoString(cFiles[i].name),
Size: int64(cFiles[i].size),
Mode: uint32(cFiles[i].mode)}
}
return items
}
@ -192,6 +192,14 @@ func (question QuestionAny) QuestionSelectProvider() (QuestionSelectProvider, er
return QuestionSelectProvider{}, fmt.Errorf("Can not convert to QuestionInstallIgnorepkg")
}
func (question QuestionAny) QuestionReplace() (QuestionReplace, error) {
if question.Type() == QuestionTypeReplacePkg {
return *(*QuestionReplace)(unsafe.Pointer(&question)), nil
}
return QuestionReplace{}, fmt.Errorf("Can not convert to QuestionReplace")
}
func (question QuestionInstallIgnorepkg) SetInstall(install bool) {
if install {
question.ptr.install = 1

View file

@ -2,7 +2,7 @@ package aur
import (
"encoding/json"
"fmt"
"errors"
"net/http"
"net/url"
)
@ -41,6 +41,7 @@ type Pkg struct {
Provides []string `json:"Provides"`
Replaces []string `json:"Replaces"`
OptDepends []string `json:"OptDepends"`
Groups []string `json:"Groups"`
License []string `json:"License"`
Keywords []string `json:"Keywords"`
}
@ -61,7 +62,7 @@ func get(values url.Values) ([]Pkg, error) {
}
if len(result.Error) > 0 {
return nil, fmt.Errorf(result.Error)
return nil, errors.New(result.Error)
}
return result.Results, nil

View file

@ -1,283 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// based on the lexer from: src/pkg/text/template/parse/lex.go (golang source)
package pkgbuild
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
// pos is a position in input being scanned
type pos int
type item struct {
typ itemType
pos pos
val string
}
func (i item) String() string {
switch {
case i.typ == itemEOF:
return "EOF"
case i.typ == itemError:
return i.val
case len(i.val) > 10:
return fmt.Sprintf("%.10q...", i.val)
}
return fmt.Sprintf("%q", i.val)
}
type itemType int
const (
itemError itemType = iota
itemEOF
itemVariable
itemValue
itemEndSplit
// PKGBUILD variables
itemPkgname // pkgname variable
itemPkgver // pkgver variable
itemPkgrel // pkgrel variable
itemPkgdir // pkgdir variable
itemEpoch // epoch variable
itemPkgbase // pkgbase variable
itemPkgdesc // pkgdesc variable
itemArch // arch variable
itemURL // url variable
itemLicense // license variable
itemGroups // groups variable
itemDepends // depends variable
itemOptdepends // optdepends variable
itemMakedepends // makedepends variable
itemCheckdepends // checkdepends variable
itemProvides // provides variable
itemConflicts // conflicts variable
itemReplaces // replaces variable
itemBackup // backup variable
itemOptions // options variable
itemInstall // install variable
itemChangelog // changelog variable
itemSource // source variable
itemNoextract // noextract variable
itemMd5sums // md5sums variable
itemSha1sums // sha1sums variable
itemSha224sums // sha224sums variable
itemSha256sums // sha256sums variable
itemSha384sums // sha384sums variable
itemSha512sums // sha512sums variable
itemValidpgpkeys // validpgpkeys variable
)
// PKGBUILD variables
var variables = map[string]itemType{
"pkgname": itemPkgname,
"pkgver": itemPkgver,
"pkgrel": itemPkgrel,
"pkgdir": itemPkgdir,
"epoch": itemEpoch,
"pkgbase": itemPkgbase,
"pkgdesc": itemPkgdesc,
"arch": itemArch,
"url": itemURL,
"license": itemLicense,
"groups": itemGroups,
"depends": itemDepends,
"optdepends": itemOptdepends,
"makedepends": itemMakedepends,
"checkdepends": itemCheckdepends,
"provides": itemProvides,
"conflicts": itemConflicts,
"replaces": itemReplaces,
"backup": itemBackup,
"options": itemOptions,
"install": itemInstall,
"changelog": itemChangelog,
"source": itemSource,
"noextract": itemNoextract,
"md5sums": itemMd5sums,
"sha1sums": itemSha1sums,
"sha224sums": itemSha224sums,
"sha256sums": itemSha256sums,
"sha384sums": itemSha384sums,
"sha512sums": itemSha512sums,
"validpgpkeys": itemValidpgpkeys,
}
const eof = -1
// stateFn represents the state of the scanner as a function that returns the next state
type stateFn func(*lexer) stateFn
// lexer holds the state of the scanner
type lexer struct {
input string
state stateFn
pos pos
start pos
width pos
lastPos pos
items chan item // channel of scanned items
}
// next returns the next rune in the input
func (l *lexer) next() rune {
if int(l.pos) >= len(l.input) {
l.width = 0
return eof
}
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
l.width = pos(w)
l.pos += l.width
return r
}
// peek returns but does not consume the next rune in the input
func (l *lexer) peek() rune {
r := l.next()
l.backup()
return r
}
// backup steps back one rune. Can only be called once per call of next
func (l *lexer) backup() {
l.pos -= l.width
}
// emit passes an item back to the client
func (l *lexer) emit(t itemType) {
l.items <- item{t, l.start, l.input[l.start:l.pos]}
l.start = l.pos
}
// ignore skips over the pending input before this point
func (l *lexer) ignore() {
l.start = l.pos
}
// errorf returns an error token and terminates the scan by passing
// back a nil pointer that will be the next state, terminating l.nextItem.
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
return nil
}
// nextItem returns the next item from the input.
func (l *lexer) nextItem() item {
item := <-l.items
l.lastPos = item.pos
return item
}
func lex(input string) *lexer {
l := &lexer{
input: input,
items: make(chan item),
}
go l.run()
return l
}
func (l *lexer) run() {
for l.state = lexEnv; l.state != nil; {
l.state = l.state(l)
}
}
func lexEnv(l *lexer) stateFn {
var r rune
for {
switch r = l.next(); {
case r == eof:
l.emit(itemEOF)
return nil
case isAlphaNumericUnderscore(r):
return lexVariable
case r == '\n':
buffer := l.input[l.start:l.pos]
if buffer == "\n" {
if l.peek() == '\n' {
l.next()
l.emit(itemEndSplit)
}
l.ignore()
}
case r == '\t':
l.ignore()
case r == ' ':
l.ignore()
case r == '#':
return lexComment
default:
l.errorf("unable to parse character: %c", r)
}
}
}
func lexComment(l *lexer) stateFn {
for {
switch l.next() {
case '\n':
l.ignore()
return lexEnv
case eof:
l.emit(itemEOF)
return nil
}
}
}
func lexVariable(l *lexer) stateFn {
for {
switch r := l.next(); {
case isAlphaNumericUnderscore(r):
// absorb
case r == ' ' && l.peek() == '=':
l.backup()
variable := l.input[l.start:l.pos]
// strip arch from source_arch like constructs
witharch := strings.SplitN(variable, "_", 2)
if len(witharch) == 2 {
variable = witharch[0]
}
if _, ok := variables[variable]; ok {
l.emit(variables[variable])
// TODO to cut off ' = '
l.next()
l.next()
l.next()
l.ignore()
return lexValue
}
return l.errorf("invalid variable: %s", variable)
default:
pattern := l.input[l.start:l.pos]
return l.errorf("invalid pattern: %s", pattern)
}
}
}
func lexValue(l *lexer) stateFn {
for {
switch l.next() {
case '\n':
l.backup()
l.emit(itemValue)
return lexEnv
}
}
}
// isAlphaNumericUnderscore reports whether r is an alphabetic, digit, or underscore.
func isAlphaNumericUnderscore(r rune) bool {
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
}

View file

@ -1,625 +0,0 @@
package pkgbuild
import (
"bytes"
"fmt"
"io/ioutil"
"strconv"
"strings"
)
// Dependency describes a dependency with min and max version, if any.
type Dependency struct {
Name string // dependency name
MinVer *CompleteVersion // min version
sgt bool // defines if min version is strictly greater than
MaxVer *CompleteVersion // max version
slt bool // defines if max version is strictly less than
}
// Restrict merges two dependencies together into a new dependency where the
// conditions of both a and b are met
func (a *Dependency) Restrict(b *Dependency) *Dependency {
newDep := &Dependency{
Name: a.Name,
}
if a.MaxVer != nil || b.MaxVer != nil {
newDep.MaxVer = &CompleteVersion{}
if a.MaxVer == nil {
*newDep.MaxVer = *b.MaxVer
newDep.slt = b.slt
} else if b.MaxVer == nil {
*newDep.MaxVer = *a.MaxVer
newDep.slt = a.slt
} else {
cmpMax := a.MaxVer.cmp(b.MaxVer)
if cmpMax >= 1 {
*newDep.MaxVer = *b.MaxVer
newDep.slt = b.slt
} else if cmpMax <= -1 {
*newDep.MaxVer = *a.MaxVer
newDep.slt = a.slt
} else if cmpMax == 0 {
if len(a.MaxVer.Pkgrel) > len(b.MaxVer.Pkgrel) {
*newDep.MaxVer = *a.MaxVer
} else {
*newDep.MaxVer = *b.MaxVer
}
if a.slt != b.slt {
newDep.slt = true
} else {
newDep.slt = a.slt
}
}
}
}
if a.MinVer != nil || b.MinVer != nil {
newDep.MinVer = &CompleteVersion{}
if a.MinVer == nil {
*newDep.MinVer = *b.MinVer
newDep.sgt = b.slt
} else if b.MinVer == nil {
*newDep.MinVer = *a.MinVer
newDep.sgt = a.sgt
} else {
cmpMin := a.MinVer.cmp(b.MinVer)
if cmpMin >= 1 {
*newDep.MinVer = *a.MinVer
newDep.sgt = a.sgt
} else if cmpMin <= -1 {
*newDep.MinVer = *b.MinVer
newDep.sgt = b.sgt
} else if cmpMin == 0 {
if len(a.MinVer.Pkgrel) > len(b.MinVer.Pkgrel) {
*newDep.MinVer = *a.MinVer
} else {
*newDep.MinVer = *b.MinVer
}
if a.sgt != b.sgt {
newDep.sgt = true
} else {
newDep.sgt = a.sgt
}
}
}
}
return newDep
}
func (dep *Dependency) String() string {
str := ""
greaterThan := ">"
lessThan := "<"
if !dep.sgt {
greaterThan = ">="
}
if !dep.slt {
lessThan = "<="
}
if dep.MinVer != nil {
str += dep.Name + greaterThan + dep.MinVer.String()
if dep.MaxVer != nil {
str += " "
}
}
if dep.MaxVer != nil {
str += dep.Name + lessThan + dep.MaxVer.String()
}
return str
}
// PKGBUILD is a struct describing a parsed PKGBUILD file.
// Required fields are:
// pkgname
// pkgver
// pkgrel
// arch
// (license) - not required but recommended
//
// parsing a PKGBUILD file without these fields will fail
type PKGBUILD struct {
Pkgnames []string
Pkgver Version // required
Pkgrel Version // required
Pkgdir string
Epoch int
Pkgbase string
Pkgdesc string
Arch []string // required
URL string
License []string // recommended
Groups []string
Depends []*Dependency
Optdepends []string
Makedepends []*Dependency
Checkdepends []*Dependency
Provides []string
Conflicts []string
Replaces []string
Backup []string
Options []string
Install string
Changelog string
Source []string
Noextract []string
Md5sums []string
Sha1sums []string
Sha224sums []string
Sha256sums []string
Sha384sums []string
Sha512sums []string
Validpgpkeys []string
}
// Newer is true if p has a higher version number than p2
func (p *PKGBUILD) Newer(p2 *PKGBUILD) bool {
if p.Epoch < p2.Epoch {
return false
}
if p.Pkgver.bigger(p2.Pkgver) {
return true
}
if p2.Pkgver.bigger(p.Pkgver) {
return false
}
return p.Pkgrel > p2.Pkgrel
}
// Older is true if p has a smaller version number than p2
func (p *PKGBUILD) Older(p2 *PKGBUILD) bool {
if p.Epoch < p2.Epoch {
return true
}
if p2.Pkgver.bigger(p.Pkgver) {
return true
}
if p.Pkgver.bigger(p2.Pkgver) {
return false
}
return p.Pkgrel < p2.Pkgrel
}
// Version returns the full version of the PKGBUILD (including epoch and rel)
func (p *PKGBUILD) Version() string {
if p.Epoch > 0 {
return fmt.Sprintf("%d:%s-%s", p.Epoch, p.Pkgver, p.Pkgrel)
}
return fmt.Sprintf("%s-%s", p.Pkgver, p.Pkgrel)
}
// CompleteVersion returns a Complete version struct including version, rel and
// epoch.
func (p *PKGBUILD) CompleteVersion() CompleteVersion {
return CompleteVersion{
Version: p.Pkgver,
Epoch: uint8(p.Epoch),
Pkgrel: p.Pkgrel,
}
}
// BuildDepends is Depends, MakeDepends and CheckDepends combined.
func (p *PKGBUILD) BuildDepends() []*Dependency {
// TODO real merge
deps := make([]*Dependency, len(p.Depends)+len(p.Makedepends)+len(p.Checkdepends))
deps = append(p.Depends, p.Makedepends...)
deps = append(deps, p.Checkdepends...)
return deps
}
// IsDevel returns true if package contains devel packages (-{bzr,git,svn,hg})
// TODO: more robust check.
func (p *PKGBUILD) IsDevel() bool {
for _, name := range p.Pkgnames {
if strings.HasSuffix(name, "-git") {
return true
}
if strings.HasSuffix(name, "-svn") {
return true
}
if strings.HasSuffix(name, "-hg") {
return true
}
if strings.HasSuffix(name, "-bzr") {
return true
}
}
return false
}
// MustParseSRCINFO must parse the .SRCINFO given by path or it will panic
func MustParseSRCINFO(path string) *PKGBUILD {
pkgbuild, err := ParseSRCINFO(path)
if err != nil {
panic(err)
}
return pkgbuild
}
// ParseSRCINFO parses .SRCINFO file given by path.
// This is a safe alternative to ParsePKGBUILD given that a .SRCINFO file is
// available
func ParseSRCINFO(path string) (*PKGBUILD, error) {
f, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("unable to read file: %s, %s", path, err.Error())
}
return parsePKGBUILD(string(f))
}
// ParseSRCINFOContent parses a .SRCINFO formatted byte slice.
// This is a safe alternative to ParsePKGBUILD given that the .SRCINFO content
// is available
func ParseSRCINFOContent(content []byte) (*PKGBUILD, error) {
return parsePKGBUILD(string(content))
}
// parse a PKGBUILD and check that the required fields has a non-empty value
func parsePKGBUILD(input string) (*PKGBUILD, error) {
pkgb, err := parse(input)
if err != nil {
return nil, err
}
if !validPkgver(string(pkgb.Pkgver)) {
return nil, fmt.Errorf("invalid pkgver: %s", pkgb.Pkgver)
}
if len(pkgb.Arch) == 0 {
return nil, fmt.Errorf("Arch missing")
}
if len(pkgb.Pkgnames) == 0 {
return nil, fmt.Errorf("missing pkgname")
}
for _, name := range pkgb.Pkgnames {
if !validPkgname(name) {
return nil, fmt.Errorf("invalid pkgname: %s", name)
}
}
return pkgb, nil
}
// parses a SRCINFO formatted PKGBUILD
func parse(input string) (*PKGBUILD, error) {
var pkgbuild *PKGBUILD
var next item
lexer := lex(input)
Loop:
for {
token := lexer.nextItem()
// strip arch from source_arch like constructs
witharch := strings.SplitN(token.val, "_", 2)
if len(witharch) == 2 {
found := false
for _, arch := range pkgbuild.Arch {
if arch == witharch[1] {
token.val = witharch[0]
found = true
break
}
}
if !found {
return nil, fmt.Errorf("unsupported arch for variable: %s", token.val)
}
}
switch token.typ {
case itemPkgbase:
next = lexer.nextItem()
pkgbuild = &PKGBUILD{Epoch: 0, Pkgbase: next.val}
case itemPkgname:
next = lexer.nextItem()
pkgbuild.Pkgnames = append(pkgbuild.Pkgnames, next.val)
case itemPkgver:
next = lexer.nextItem()
version, err := parseVersion(next.val)
if err != nil {
return nil, err
}
pkgbuild.Pkgver = version
case itemPkgrel:
next = lexer.nextItem()
rel, err := parseVersion(next.val)
if err != nil {
return nil, err
}
pkgbuild.Pkgrel = rel
case itemPkgdir:
next = lexer.nextItem()
pkgbuild.Pkgdir = next.val
case itemEpoch:
next = lexer.nextItem()
epoch, err := strconv.ParseInt(next.val, 10, 0)
if err != nil {
return nil, err
}
if epoch < 0 {
return nil, fmt.Errorf("invalid epoch: %d", epoch)
}
pkgbuild.Epoch = int(epoch)
case itemPkgdesc:
next = lexer.nextItem()
pkgbuild.Pkgdesc = next.val
case itemArch:
next = lexer.nextItem()
pkgbuild.Arch = append(pkgbuild.Arch, next.val)
case itemURL:
next = lexer.nextItem()
pkgbuild.URL = next.val
case itemLicense:
next = lexer.nextItem()
pkgbuild.License = append(pkgbuild.License, next.val)
case itemGroups:
next = lexer.nextItem()
pkgbuild.Groups = append(pkgbuild.Groups, next.val)
case itemDepends:
next = lexer.nextItem()
deps, err := parseDependency(next.val, pkgbuild.Depends)
if err != nil {
return nil, err
}
pkgbuild.Depends = deps
case itemOptdepends:
next = lexer.nextItem()
pkgbuild.Optdepends = append(pkgbuild.Optdepends, next.val)
case itemMakedepends:
next = lexer.nextItem()
deps, err := parseDependency(next.val, pkgbuild.Makedepends)
if err != nil {
return nil, err
}
pkgbuild.Makedepends = deps
case itemCheckdepends:
next = lexer.nextItem()
deps, err := parseDependency(next.val, pkgbuild.Checkdepends)
if err != nil {
return nil, err
}
pkgbuild.Checkdepends = deps
case itemProvides:
next = lexer.nextItem()
pkgbuild.Provides = append(pkgbuild.Provides, next.val)
case itemConflicts:
next = lexer.nextItem()
pkgbuild.Conflicts = append(pkgbuild.Conflicts, next.val)
case itemReplaces:
next = lexer.nextItem()
pkgbuild.Replaces = append(pkgbuild.Replaces, next.val)
case itemBackup:
next = lexer.nextItem()
pkgbuild.Backup = append(pkgbuild.Backup, next.val)
case itemOptions:
next = lexer.nextItem()
pkgbuild.Options = append(pkgbuild.Options, next.val)
case itemInstall:
next = lexer.nextItem()
pkgbuild.Install = next.val
case itemChangelog:
next = lexer.nextItem()
pkgbuild.Changelog = next.val
case itemSource:
next = lexer.nextItem()
pkgbuild.Source = append(pkgbuild.Source, next.val)
case itemNoextract:
next = lexer.nextItem()
pkgbuild.Noextract = append(pkgbuild.Noextract, next.val)
case itemMd5sums:
next = lexer.nextItem()
pkgbuild.Md5sums = append(pkgbuild.Md5sums, next.val)
case itemSha1sums:
next = lexer.nextItem()
pkgbuild.Sha1sums = append(pkgbuild.Sha1sums, next.val)
case itemSha224sums:
next = lexer.nextItem()
pkgbuild.Sha224sums = append(pkgbuild.Sha224sums, next.val)
case itemSha256sums:
next = lexer.nextItem()
pkgbuild.Sha256sums = append(pkgbuild.Sha256sums, next.val)
case itemSha384sums:
next = lexer.nextItem()
pkgbuild.Sha384sums = append(pkgbuild.Sha384sums, next.val)
case itemSha512sums:
next = lexer.nextItem()
pkgbuild.Sha512sums = append(pkgbuild.Sha512sums, next.val)
case itemValidpgpkeys:
next = lexer.nextItem()
pkgbuild.Validpgpkeys = append(pkgbuild.Validpgpkeys, next.val)
case itemEndSplit:
case itemError:
return nil, fmt.Errorf(token.val)
case itemEOF:
break Loop
default:
return nil, fmt.Errorf("invalid variable: %s", token.val)
}
}
return pkgbuild, nil
}
// parse and validate a version string
func parseVersion(s string) (Version, error) {
if validPkgver(s) {
return Version(s), nil
}
return "", fmt.Errorf("invalid version string: %s", s)
}
// check if name is a valid pkgname format
func validPkgname(name string) bool {
if len(name) < 1 {
return false
}
if name[0] == '-' {
return false
}
for _, r := range name {
if !isValidPkgnameChar(r) {
return false
}
}
return true
}
// check if version is a valid pkgver format
func validPkgver(version string) bool {
if len(version) < 1 {
return false
}
if !isAlphaNumeric(rune(version[0])) {
return false
}
for _, r := range version[1:] {
if !isValidPkgverChar(r) {
return false
}
}
return true
}
// ParseDeps parses a string slice of dependencies into a slice of Dependency
// objects.
func ParseDeps(deps []string) ([]*Dependency, error) {
var err error
dependencies := make([]*Dependency, 0)
for _, dep := range deps {
dependencies, err = parseDependency(dep, dependencies)
if err != nil {
return nil, err
}
}
return dependencies, nil
}
// parse dependency with possible version restriction
func parseDependency(dep string, deps []*Dependency) ([]*Dependency, error) {
var name string
var dependency *Dependency
index := -1
if dep == "" {
return deps, nil
}
if dep[0] == '-' {
return nil, fmt.Errorf("invalid dependency name")
}
i := 0
for _, c := range dep {
if !isValidPkgnameChar(c) {
break
}
i++
}
// check if the dependency has been set before
name = dep[0:i]
for n, d := range deps {
if d.Name == name {
index = n
break
}
}
dependency = &Dependency{
Name: name,
sgt: false,
slt: false,
}
if len(dep) != len(name) {
var eq bytes.Buffer
for _, c := range dep[i:] {
if c == '<' || c == '>' || c == '=' {
i++
eq.WriteRune(c)
continue
}
break
}
version, err := NewCompleteVersion(dep[i:])
if err != nil {
return nil, err
}
switch eq.String() {
case "=":
dependency.MinVer = version
dependency.MaxVer = version
case "<=":
dependency.MaxVer = version
case ">=":
dependency.MinVer = version
case "<":
dependency.MaxVer = version
dependency.slt = true
case ">":
dependency.MinVer = version
dependency.sgt = true
}
}
if index == -1 {
deps = append(deps, dependency)
} else {
deps[index] = deps[index].Restrict(dependency)
}
return deps, nil
}
// isLowerAlpha reports whether c is a lowercase alpha character
func isLowerAlpha(c rune) bool {
return 'a' <= c && c <= 'z'
}
// check if c is a valid pkgname char
func isValidPkgnameChar(c rune) bool {
return isAlphaNumeric(c) || c == '@' || c == '.' || c == '_' || c == '+' || c == '-'
}
// check if c is a valid pkgver char
func isValidPkgverChar(c rune) bool {
return isAlphaNumeric(c) || c == '_' || c == '+' || c == '.' || c == '~'
}

View file

@ -1,324 +0,0 @@
package pkgbuild
import (
"fmt"
"strconv"
"strings"
"unicode"
)
// Version string
type Version string
type CompleteVersion struct {
Version Version
Epoch uint8
Pkgrel Version
}
func (c *CompleteVersion) String() string {
str := ""
if c.Epoch > 0 {
str = fmt.Sprintf("%d:", c.Epoch)
}
str = fmt.Sprintf("%s%s", str, c.Version)
if c.Pkgrel != "" {
str = fmt.Sprintf("%s-%s", str, c.Pkgrel)
}
return str
}
// NewCompleteVersion creates a CompleteVersion including basic version, epoch
// and rel from string
func NewCompleteVersion(s string) (*CompleteVersion, error) {
var err error
epoch := 0
rel := Version("")
// handle possible epoch
versions := strings.Split(s, ":")
if len(versions) > 2 {
return nil, fmt.Errorf("invalid version format: %s", s)
}
if len(versions) > 1 {
epoch, err = strconv.Atoi(versions[0])
if err != nil {
return nil, err
}
}
// handle possible rel
versions = strings.Split(versions[len(versions)-1], "-")
if len(versions) > 2 {
return nil, fmt.Errorf("invalid version format: %s", s)
}
if len(versions) > 1 {
rel = Version(versions[1])
}
// finally check that the actual version is valid
if validPkgver(versions[0]) {
return &CompleteVersion{
Version: Version(versions[0]),
Epoch: uint8(epoch),
Pkgrel: rel,
}, nil
}
return nil, fmt.Errorf("invalid version format: %s", s)
}
// Older returns true if a is older than the argument version
func (a *CompleteVersion) Older(b *CompleteVersion) bool {
return a.cmp(b) == -1
}
// Newer returns true if a is newer than the argument version
func (a *CompleteVersion) Newer(b *CompleteVersion) bool {
return a.cmp(b) == 1
}
// Equal returns true if a is equal to the argument version
func (a *CompleteVersion) Equal(b *CompleteVersion) bool {
return a.cmp(b) == 0
}
// Satisfies tests whether or not version fits inside the bounds specified by
// dep
func (version *CompleteVersion) Satisfies(dep *Dependency) bool {
var cmpMax int8
var cmpMin int8
if dep.MaxVer != nil {
cmpMax = version.cmp(dep.MaxVer)
if cmpMax == 1 {
return false
}
if cmpMax == 0 && dep.slt {
return false
}
}
if dep.MinVer != nil {
if dep.MaxVer == dep.MinVer {
cmpMin = cmpMax
} else {
cmpMin = version.cmp(dep.MinVer)
}
if cmpMin == -1 {
return false
}
if cmpMin == 0 && dep.sgt {
return false
}
}
return true
}
// Compare a to b:
// return 1: a is newer than b
// 0: a and b are the same version
// -1: b is newer than a
func (a *CompleteVersion) cmp(b *CompleteVersion) int8 {
if a.Epoch > b.Epoch {
return 1
}
if a.Epoch < b.Epoch {
return -1
}
if a.Version.bigger(b.Version) {
return 1
}
if b.Version.bigger(a.Version) {
return -1
}
if a.Pkgrel == "" || b.Pkgrel == "" {
return 0
}
if a.Pkgrel.bigger(b.Pkgrel) {
return 1
}
if b.Pkgrel.bigger(a.Pkgrel) {
return -1
}
return 0
}
// Compare alpha and numeric segments of two versions.
// return 1: a is newer than b
// 0: a and b are the same version
// -1: b is newer than a
//
// This is based on the rpmvercmp function used in libalpm
// https://projects.archlinux.org/pacman.git/tree/lib/libalpm/version.c
func rpmvercmp(av, bv Version) int {
if av == bv {
return 0
}
a, b := []rune(string(av)), []rune(string(bv))
var one, two, ptr1, ptr2 int
var isNum bool
one, two, ptr1, ptr2 = 0, 0, 0, 0
// loop through each version segment of a and b and compare them
for len(a) > one && len(b) > two {
for len(a) > one && !isAlphaNumeric(a[one]) {
one++
}
for len(b) > two && !isAlphaNumeric(b[two]) {
two++
}
// if we ran to the end of either, we are finished with the loop
if !(len(a) > one && len(b) > two) {
break
}
// if the seperator lengths were different, we are also finished
if one-ptr1 != two-ptr2 {
if one-ptr1 < two-ptr2 {
return -1
}
return 1
}
ptr1 = one
ptr2 = two
// grab first completely alpha or completely numeric segment
// leave one and two pointing to the start of the alpha or numeric
// segment and walk ptr1 and ptr2 to end of segment
if isDigit(a[ptr1]) {
for len(a) > ptr1 && isDigit(a[ptr1]) {
ptr1++
}
for len(b) > ptr2 && isDigit(b[ptr2]) {
ptr2++
}
isNum = true
} else {
for len(a) > ptr1 && isAlpha(a[ptr1]) {
ptr1++
}
for len(b) > ptr2 && isAlpha(b[ptr2]) {
ptr2++
}
isNum = false
}
// take care of the case where the two version segments are
// different types: one numeric, the other alpha (i.e. empty)
// numeric segments are always newer than alpha segments
if two == ptr2 {
if isNum {
return 1
}
return -1
}
if isNum {
// we know this part of the strings only contains digits
// so we can ignore the error value since it should
// always be nil
as, _ := strconv.ParseInt(string(a[one:ptr1]), 10, 0)
bs, _ := strconv.ParseInt(string(b[two:ptr2]), 10, 0)
// whichever number has more digits wins
if as > bs {
return 1
}
if as < bs {
return -1
}
} else {
cmp := alphaCompare(a[one:ptr1], b[two:ptr2])
if cmp < 0 {
return -1
}
if cmp > 0 {
return 1
}
}
// advance one and two to next segment
one = ptr1
two = ptr2
}
// this catches the case where all numeric and alpha segments have
// compared identically but the segment separating characters were
// different
if len(a) <= one && len(b) <= two {
return 0
}
// the final showdown. we never want a remaining alpha string to
// beat an empty string. the logic is a bit weird, but:
// - if one is empty and two is not an alpha, two is newer.
// - if one is an alpha, two is newer.
// - otherwise one is newer.
if (len(a) <= one && !isAlpha(b[two])) || len(a) > one && isAlpha(a[one]) {
return -1
}
return 1
}
// alphaCompare compares two alpha version segments and will return a positive
// value if a is bigger than b and a negative if b is bigger than a else 0
func alphaCompare(a, b []rune) int8 {
if string(a) == string(b) {
return 0
}
i := 0
for len(a) > i && len(b) > i && a[i] == b[i] {
i++
}
if len(a) == i && len(b) > i {
return -1
}
if len(b) == i {
return 1
}
return int8(a[i]) - int8(b[i])
}
// check if version number v is bigger than v2
func (v Version) bigger(v2 Version) bool {
return rpmvercmp(v, v2) == 1
}
// isAlphaNumeric reports whether c is an alpha character or digit
func isAlphaNumeric(c rune) bool {
return isDigit(c) || isAlpha(c)
}
// isAlpha reports whether c is an alpha character
func isAlpha(c rune) bool {
return unicode.IsLetter(c)
}
// isDigit reports whether d is an ASCII digit
func isDigit(d rune) bool {
return unicode.IsDigit(d)
}