Remove duplicated output at the end

This commit is contained in:
Alberto Planas 2021-03-26 14:15:46 +01:00
parent 9269105f5b
commit 981aeed93d
2 changed files with 30 additions and 20 deletions

View file

@ -4,7 +4,7 @@ import os
import sys import sys
from . import Metadata from . import Metadata
from .metadata import normalize_deps from .metadata import normalize_deps, Dependency
def _get_binaries(cargo_toml): def _get_binaries(cargo_toml):
@ -81,49 +81,59 @@ def main():
features.add(f or None) features.add(f or None)
def process_metadata(md): def process_metadata(md):
data = set() data = []
if args.name: if args.name:
data.add(md.name) data.append(md.name)
if args.version: if args.version:
data.add(md._version) data.append(md._version)
if args.rpm_version: if args.rpm_version:
data.add(md.version) data.append(md.version)
if args.target_kinds: if args.target_kinds:
data.update(tgt.kind for tgt in md.targets) data.extend(tgt.kind for tgt in md.targets)
if args.list_features: if args.list_features:
data.update(f for f in md.dependencies if f is not None) data.extend(f for f in md.dependencies if f is not None)
if args.provides: if args.provides:
data.update(md.provides(f) for f in features) data.extend(md.provides(f) for f in features)
if args.requires: if args.requires:
# Someone should own /usr/share/cargo/registry # Someone should own /usr/share/cargo/registry
data.add('cargo') data.append('cargo')
if args.all_features: if args.all_features:
data.update(md.all_dependencies) data.extend(md.all_dependencies)
else: else:
for f in features: for f in features:
data.update(md.requires(f)) data.extend(md.requires(f))
if args.build_requires: if args.build_requires:
data.add("rust-packaging") data.append("rust-packaging")
if args.all_features: if args.all_features:
data.update(md.all_dependencies) data.extend(md.all_dependencies)
else: else:
for f in features: for f in features:
data.update(md.requires(f, resolve=True)) data.extend(md.requires(f, resolve=True))
if args.test_requires: if args.test_requires:
data.update(md.dev_dependencies) data.extend(md.dev_dependencies)
if args.provides_vendor: if args.provides_vendor:
# Print the vendoring providers only if the 'vendor' # Print the vendoring providers only if the 'vendor'
# directory is present # directory is present
if args.vendor or os.path.isdir('vendor'): if args.vendor or os.path.isdir('vendor'):
data.update(md.resolved_dependencies()) data.extend(md.resolved_dependencies())
return data return data
for f in files: for f in files:
data = set() data = set()
mds = Metadata.from_file(f, include_members=args.include_workspaces) mds = Metadata.from_file(f, include_members=args.include_workspaces)
for md in mds: for md in mds:
data.update(process_metadata(md)) # process_metadata can return an [string], but can be also
for line in data: # a [Dependency] instances, that once presented have
# multiple substrings. If we want to order the data and
# remove all the duplicates we should first normalize it
metadata_lines = []
for metadata in process_metadata(md):
if isinstance(metadata, Dependency):
metadata_lines.extend(metadata.normalize())
else:
metadata_lines.append(metadata)
data.update(metadata_lines)
for line in sorted(data):
print(line) print(line)

View file

@ -371,9 +371,9 @@ class Metadata:
return self return self
@classmethod @classmethod
def from_file(cls, path): def from_file(cls, path, include_members=False):
instances = [] instances = []
members = Metadata.members(path) members = Metadata.members(path) if include_members else []
for member in (members or [path]): for member in (members or [path]):
instance = cls.from_json(Metadata.manifest(member)) instance = cls.from_json(Metadata.manifest(member))
instance._path = member instance._path = member