Merge branch 'fernandocollova-master'

This commit is contained in:
Rupus Reinefjord 2019-01-01 14:59:16 +01:00
commit c54ff5d948
2 changed files with 143 additions and 80 deletions

View file

@ -1,36 +1,44 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import csv import csv
import os
import sys
import gnupg
import re
import logging import logging
import os
import re
from argparse import ArgumentParser
import gnupg
class CSVExporter():
def __init__(self, kpx_format):
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
# Set to True to allow for alternate password csv to be created # Set to True to allow for alternate password csv to be created
# See README for differences # See README for differences
KPX_FORMAT=True self.kpx_format = kpx_format
if KPX_FORMAT: if self.kpx_format:
# A list of possible fields (in order) that could be converted to login fields # A list of possible fields (in order) that could be converted to login fields
LOGIN_FIELDS=['login', 'user', 'username', 'email'] self.login_fields = ['login', 'user', 'username', 'email']
# Set to True to extract url fields # Set to True to extract url fields
GET_URL=True self.get_url = True
# A regular expression list of lines that should be excluded from the notes field # A regular expression list of lines that should be excluded from the notes field
EXCLUDE_ROWS=['^---$', '^autotype ?: ?'] self.exclude_rows = ['^---$', '^autotype ?: ?']
logger.info("Using KPX format: %s", KPX_FORMAT) self.logger.info("Using KPX format: %s", self.kpx_format)
def traverse(self, path):
def traverse(path):
for root, dirs, files in os.walk(path): for root, dirs, files in os.walk(path):
if '.git' in dirs: if '.git' in dirs:
dirs.remove('.git') dirs.remove('.git')
for name in files: for name in files:
yield os.path.join(root, name) yield os.path.join(root, name)
def getMetadata(notes_raw): def getMetadata(self, notes_raw):
lines = notes_raw.split('\n') lines = notes_raw.split('\n')
# A list of lines to keep as notes (will be joined by newline) # A list of lines to keep as notes (will be joined by newline)
@ -48,25 +56,27 @@ def getMetadata(notes_raw):
all_fields.add(field_search.group(1)) all_fields.add(field_search.group(1))
# Check if any of the fields match the login names # Check if any of the fields match the login names
login_fields = [field for field in LOGIN_FIELDS if field in all_fields] login_fields = [
# Get the field to use for the login. Since LOGIN_FIELDS is in order, the 0th element will contain the first match field for field in self.login_fields if field in all_fields]
# Get the field to use for the login. Since self.login_fields is in order, the 0th element will contain the first match
login_field = None if not login_fields else login_fields[0] login_field = None if not login_fields else login_fields[0]
# Iterate through the file again to build the return array # Iterate through the file again to build the return array
for line in lines: for line in lines:
# If any of the exclusion patterns match, ignore the line # If any of the exclusion patterns match, ignore the line
if len([pattern for pattern in EXCLUDE_ROWS if re.search(pattern, line, re.I)]) != 0: if [pattern for pattern in self.exclude_rows if re.search(pattern, line, re.I)]:
continue continue
if login_field: if login_field:
user_search = re.search('^' + login_field + ' ?: ?(.*)$', line, re.I) user_search = re.search(
'^' + login_field + ' ?: ?(.*)$', line, re.I)
if user_search: if user_search:
user = user_search.group(1) user = user_search.group(1)
# The user was matched, don't add it to notes # The user was matched, don't add it to notes
continue continue
if GET_URL: if self.get_url:
url_search = re.search('^url ?: ?(.*)$', line, re.I) url_search = re.search('^url ?: ?(.*)$', line, re.I)
if url_search: if url_search:
url = url_search.group(1) url = url_search.group(1)
@ -77,38 +87,82 @@ def getMetadata(notes_raw):
return (user, url, '\n'.join(notes).strip()) return (user, url, '\n'.join(notes).strip())
def parse(basepath, path, data): def parse(self, basepath, path, data):
name = os.path.splitext(os.path.basename(path))[0] name = os.path.splitext(os.path.basename(path))[0]
group = os.path.dirname(os.path.os.path.relpath(path, basepath)) group = os.path.dirname(os.path.os.path.relpath(path, basepath))
split_data = data.split('\n', maxsplit=1) split_data = data.split('\n', maxsplit=1)
password = split_data[0] password = split_data[0]
# Perform if/else in case there are no notes for a field # Perform if/else in case there are no notes for a field
notes = split_data[1] if len(split_data) > 1 else "" notes = split_data[1] if len(split_data) > 1 else ""
logger.info("Processing %s" % (name,)) self.logger.info("Processing %s", name)
if KPX_FORMAT: if self.kpx_format:
# We are using the advanced format; try extracting user and url # We are using the advanced format; try extracting user and url
user, url, notes = getMetadata(notes) user, url, notes = self.getMetadata(notes)
return [group, name, user, password, url, notes] return [group, name, user, password, url, notes]
else: else:
# We are not using KPX format; just use notes # We are not using KPX format; just use notes
return [group, name, password, notes] return [group, name, password, notes]
def main(path): def main(kpx_format, gpgbinary, use_agent, pass_path):
gpg = gnupg.GPG() """Main script entrypoint."""
exporter = CSVExporter(kpx_format)
gpg = gnupg.GPG(use_agent=use_agent, gpgbinary=gpgbinary)
gpg.encoding = 'utf-8' gpg.encoding = 'utf-8'
csv_data = [] csv_data = []
for file_path in traverse(path): for file_path in exporter.traverse(pass_path):
if os.path.splitext(file_path)[1] == '.gpg': if os.path.splitext(file_path)[1] == '.gpg':
with open(file_path, 'rb') as f: with open(file_path, 'rb') as f:
data = str(gpg.decrypt_file(f)) data = str(gpg.decrypt_file(f))
csv_data.append(parse(path, file_path, data)) if str == "":
raise ValueError("The password file is empty")
csv_data.append(exporter.parse(pass_path, file_path, data))
with open('pass.csv', 'w', newline='') as csv_file: with open('pass.csv', 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',') writer = csv.writer(csv_file, delimiter=',')
writer.writerows(csv_data) writer.writerows(csv_data)
class OptionsParser(ArgumentParser):
"""Regular ArgumentParser with the script's options."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_argument(
'pass_path',
metavar='path',
type=str,
help="Path to the PasswordStore folder to use",
)
self.add_argument(
'-a', '--agent',
action='store_true',
help="Use this option to ask gpg to use it's auth agent",
dest='use_agent',
)
self.add_argument(
'-b', '--gpgbinary',
type=str,
help="Path to the gpg binary you wish to use",
dest='gpgbinary',
default="gpg"
)
self.add_argument(
'-x', '--kpx',
action='store_true',
help="Use this option to format the CSV for KeePassXC",
dest='kpx_format',
)
if __name__ == '__main__': if __name__ == '__main__':
path = os.path.abspath(sys.argv[1]) PARSER = OptionsParser()
main(path) ARGS = PARSER.parse_args()
main(**vars(ARGS))

9
setup.py Normal file
View file

@ -0,0 +1,9 @@
from setuptools import setup
setup(
name='pass2csv',
version="0.1.0",
description='pass2csv',
scripts=['pass2csv.py'],
install_requires=["python-gnupg"]
)