Instead of leaving test early if they can not run, use the
pytest.mark.skipif marked to tell the test system not to
even run the test.
This also adds to the stats, because skipped tests are
counted differently. Thus making it obvious, that some
tests did not run, because of any reason.
import tempfile
import shutil
import os
-
+import pytest
from mediagoblin.media_types.pdf.processing import (
pdf_info, check_prerequisites, create_pdf_thumb)
GOOD='mediagoblin/tests/test_submission/good.pdf'
+@pytest.mark.skipif("not check_prerequisites()")
def test_pdf():
- if not check_prerequisites():
- return
good_dict = {'pdf_version_major': 1, 'pdf_title': '',
'pdf_page_size_width': 612, 'pdf_author': '',
'pdf_keywords': '', 'pdf_pages': 10,
import urlparse
import os
+import pytest
from pkg_resources import resource_filename
self._setup(test_app)
self.check_normal_upload(u'Normal upload 2', GOOD_PNG)
+ @pytest.mark.skipif("not pdf_check_prerequisites()")
def test_normal_pdf(self, test_app):
- if not pdf_check_prerequisites():
- return
self._setup(test_app)
response, context = self.do_post({'title': u'Normal upload 3 (pdf)'},
do_follow=True,