diff --git a/docs/antora.yml b/docs/antora.yml index fd02a33e7..7156dbd64 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -9,13 +9,13 @@ nav: - modules/apps/nav.adoc - modules/manual-testing/nav.adoc - modules/automation-testing/nav.adoc - - modules/scriptless-automation/nav.adoc + - modules/_scriptless-automation/nav.adoc - modules/debugging/nav.adoc - modules/kobiton-cli/nav.adoc - modules/session-explorer/nav.adoc - modules/profile/nav.adoc - modules/organization/nav.adoc - - modules/test-management/nav.adoc + - modules/_test-management/nav.adoc - modules/reporting/nav.adoc - modules/device-lab-management/nav.adoc - modules/supported-platforms/nav.adoc diff --git a/docs/modules/_scriptless-automation/_nav.adoc b/docs/modules/_scriptless-automation/_nav.adoc new file mode 100644 index 000000000..92c0b7363 --- /dev/null +++ b/docs/modules/_scriptless-automation/_nav.adoc @@ -0,0 +1,13 @@ +.xref:index.adoc[] + +* xref:_scriptless-automation:create-a-baseline-session.adoc[] + +* Get a session ID +** xref:_get-a-session-id/using-the-kobiton-portal.adoc[] +** xref:_get-a-session-id/using-the-kobiton-api.adoc[] + +* xref:run-scriptless-with-the-kobiton-portal.adoc[] +* xref:run-scriptless-with-the-kobiton-api.adoc[] +* xref:automation-testing:scripting/auto-generate-an-appium-script.adoc[] +* xref:scriptless-best-practices.adoc[] +* xref:scriptless-requirements.adoc[] diff --git a/docs/modules/scriptless-automation/pages/create-a-baseline-session.adoc b/docs/modules/_scriptless-automation/_pages/_create-a-baseline-session.adoc similarity index 87% rename from docs/modules/scriptless-automation/pages/create-a-baseline-session.adoc rename to docs/modules/_scriptless-automation/_pages/_create-a-baseline-session.adoc index 5ed99c100..114311843 100644 --- a/docs/modules/scriptless-automation/pages/create-a-baseline-session.adoc +++ b/docs/modules/_scriptless-automation/_pages/_create-a-baseline-session.adoc @@ -15,7 +15,7 @@ image:manual-testing:install-apps-context.png[width=1000,alt="Install Apps"] [IMPORTANT] ==== -Make sure your app aligns with the Scriptless xref:scriptless-automation:scriptless-requirements.adoc#_application_requirements[application requirements] before proceeding. +Make sure your app aligns with the Scriptless xref:_scriptless-automation:scriptless-requirements.adoc#_application_requirements[application requirements] before proceeding. ==== You can upload a xref:apps:supported-filetypes.adoc[supported filetype] from your computer, install an app from a URL, or choose an app from xref:apps:manage-apps.adoc[your app repository]. @@ -35,7 +35,7 @@ include::scriptless-requirements.adoc[tag=supported-actions] == Follow scriptless best practices -For best results, follow the xref:scriptless-automation:scriptless-best-practices.adoc[] while running your tests. +For best results, follow the xref:_scriptless-automation:scriptless-best-practices.adoc[] while running your tests. == Exit your session diff --git a/docs/modules/scriptless-automation/pages/get-a-session-id/using-the-kobiton-api.adoc b/docs/modules/_scriptless-automation/_pages/_get-a-session-id/using-the-kobiton-api.adoc similarity index 100% rename from docs/modules/scriptless-automation/pages/get-a-session-id/using-the-kobiton-api.adoc rename to docs/modules/_scriptless-automation/_pages/_get-a-session-id/using-the-kobiton-api.adoc diff --git a/docs/modules/scriptless-automation/pages/get-a-session-id/using-the-kobiton-portal.adoc b/docs/modules/_scriptless-automation/_pages/_get-a-session-id/using-the-kobiton-portal.adoc similarity index 100% rename from docs/modules/scriptless-automation/pages/get-a-session-id/using-the-kobiton-portal.adoc rename to docs/modules/_scriptless-automation/_pages/_get-a-session-id/using-the-kobiton-portal.adoc diff --git a/docs/modules/_scriptless-automation/_pages/_index.adoc b/docs/modules/_scriptless-automation/_pages/_index.adoc new file mode 100644 index 000000000..b7a14981e --- /dev/null +++ b/docs/modules/_scriptless-automation/_pages/_index.adoc @@ -0,0 +1,10 @@ += Scriptless Automation +:navtitle: Scriptless Automation + +Automate your tests without writing scripts. + +image:automation-testing:automation-testing-index-context.png[width=1000,alt="A context to Scriptless Automation"] + +== In this section + +include::../nav.adoc[lines=2..] diff --git a/docs/modules/scriptless-automation/pages/run-scriptless-with-the-kobiton-api.adoc b/docs/modules/_scriptless-automation/_pages/_run-scriptless-with-the-kobiton-api.adoc similarity index 100% rename from docs/modules/scriptless-automation/pages/run-scriptless-with-the-kobiton-api.adoc rename to docs/modules/_scriptless-automation/_pages/_run-scriptless-with-the-kobiton-api.adoc diff --git a/docs/modules/scriptless-automation/pages/run-scriptless-with-the-kobiton-portal.adoc b/docs/modules/_scriptless-automation/_pages/_run-scriptless-with-the-kobiton-portal.adoc similarity index 53% rename from docs/modules/scriptless-automation/pages/run-scriptless-with-the-kobiton-portal.adoc rename to docs/modules/_scriptless-automation/_pages/_run-scriptless-with-the-kobiton-portal.adoc index 03295ade3..24b5a301e 100644 --- a/docs/modules/scriptless-automation/pages/run-scriptless-with-the-kobiton-portal.adoc +++ b/docs/modules/_scriptless-automation/_pages/_run-scriptless-with-the-kobiton-portal.adoc @@ -8,7 +8,7 @@ Learn how to run Scriptless Automation using the Kobiton portal. You'll need to complete the following: * xref:create-a-baseline-session.adoc[Create a baseline session]. -* xref:test-management:create-a-test-case.adoc[] using your baseline session. +* xref:_test-management:create-a-test-case.adoc[] using your baseline session. == Open Scriptless Automation @@ -21,14 +21,14 @@ For more information, see xref:session-explorer:session-metadata.adoc[]. Select *Automated Test Case*. -image:scriptless-automation:scriptless-automated-test-case-button.png[width=1000,alt="The session overview page with the Automated Test Case button"] +image:_scriptless-automation:scriptless-automated-test-case-button.png[width=1000,alt="The session overview page with the Automated Test Case button"] Select *Rerun*. -image:scriptless-automation:scriptless-rerun-button.png[width=1000,alt="Select Rerun in the Automated Test Case screen"] +image:_scriptless-automation:scriptless-rerun-button.png[width=1000,alt="Select Rerun in the Automated Test Case screen"] [TIP] -For best results, select Rerun when the message _Congratulations! Your test case is good to go now_ appears on screen. If a different message is showing, you may need to xref:test-management:remediation/annotate-a-test-step.adoc[annotate the test steps]. +For best results, select Rerun when the message _Congratulations! Your test case is good to go now_ appears on screen. If a different message is showing, you may need to xref:_test-management:remediation/annotate-a-test-step.adoc[annotate the test steps]. == Configure your scriptless session @@ -36,15 +36,15 @@ For best results, select Rerun when the message _Congratulations! Your test case In the *Rerun Configurations* window, select *Device Bundles*. -image:scriptless-automation:scriptless-rerun-configurations-device-bundles-context.png[width=600,alt="The select device bundles dropdown list"] +image:_scriptless-automation:scriptless-rerun-configurations-device-bundles-context.png[width=600,alt="The select device bundles dropdown list"] Select the dropdown and choose a bundle. -image:scriptless-automation:scriptless-select-device-bundles.png[width=600,alt="The select device bundles dropdown list"] +image:_scriptless-automation:scriptless-select-device-bundles.png[width=600,alt="The select device bundles dropdown list"] If you'd like to remove any devices for this session, de-select the checkbox next to each device. -image:scriptless-automation:scriptless-deselect-device-from-bundles.png[width=1000,alt="A device is de-selected from the device bundles"] +image:_scriptless-automation:scriptless-deselect-device-from-bundles.png[width=1000,alt="A device is de-selected from the device bundles"] [NOTE] If you'd like to make more changes to this bundle, select *Configure Bundle*. For more information, see xref:organization:device-bundles/manage-device-bundles.adoc[]. @@ -53,30 +53,30 @@ If you'd like to make more changes to this bundle, select *Configure Bundle*. Fo Data-driven testing allows you to define custom data sets and replace values from your baseline session. In the *Rerun Configurations* window, select *Data Driven Testing*. -image:scriptless-automation:rerun-configurations-data-driven-testing-context.png[width=1000,alt="The Data Driven Testing section under Rerun Configurations"] +image:_scriptless-automation:rerun-configurations-data-driven-testing-context.png[width=1000,alt="The Data Driven Testing section under Rerun Configurations"] Select the *+* button to add new test data. -image:scriptless-automation:data-driven-new-dataset.png[width=1000,alt="Select the plus icon to add new test data"] +image:_scriptless-automation:data-driven-new-dataset.png[width=1000,alt="Select the plus icon to add new test data"] The new test data is only applied to a specific device in the bundle. Choose the device to apply by selecting it from the *Assign Device* dropdown. -image:scriptless-automation:data-driven-assign-device.png[width=500,alt="The Assign Device dropdown"] +image:_scriptless-automation:data-driven-assign-device.png[width=500,alt="The Assign Device dropdown"] To replace a value used for a specific test step, select the test step with a value input action, then enter a new value. -image:scriptless-automation:data-driven-replace-value.png[width=500,alt="Replacing a value in a test step"] +image:_scriptless-automation:data-driven-replace-value.png[width=500,alt="Replacing a value in a test step"] Once a value has been modified, it will be marked as *Edited*. -image:scriptless-automation:data-driven-test-step-edited.png[width=500,alt="The value is marked as edited"] +image:_scriptless-automation:data-driven-test-step-edited.png[width=500,alt="The value is marked as edited"] == Run Scriptless Automation When you're finished configuring your scriptless session, select *Rerun*. -image:scriptless-automation:rerun-configurations-data-driven-testing-context.png[width=1000,alt="Select Rerun after finishing the configurations"] +image:_scriptless-automation:rerun-configurations-data-driven-testing-context.png[width=1000,alt="Select Rerun after finishing the configurations"] You'll be redirected to your test run where you can view live updates for your run. -image:scriptless-automation:test-run-view.png[width=1000,alt="The test run view"] +image:_scriptless-automation:test-run-view.png[width=1000,alt="The test run view"] diff --git a/docs/modules/scriptless-automation/pages/scriptless-best-practices.adoc b/docs/modules/_scriptless-automation/_pages/_scriptless-best-practices.adoc similarity index 100% rename from docs/modules/scriptless-automation/pages/scriptless-best-practices.adoc rename to docs/modules/_scriptless-automation/_pages/_scriptless-best-practices.adoc diff --git a/docs/modules/scriptless-automation/pages/scriptless-requirements.adoc b/docs/modules/_scriptless-automation/_pages/_scriptless-requirements.adoc similarity index 83% rename from docs/modules/scriptless-automation/pages/scriptless-requirements.adoc rename to docs/modules/_scriptless-automation/_pages/_scriptless-requirements.adoc index 2c7e2cd59..52225b2b8 100644 --- a/docs/modules/scriptless-automation/pages/scriptless-requirements.adoc +++ b/docs/modules/_scriptless-automation/_pages/_scriptless-requirements.adoc @@ -103,15 +103,15 @@ _Only English is supported, excluding the emoji keyboard._ If an unsupported action is performed, the Scriptless Automation stops capturing, and subsequent steps after the unsupported action are not captured and executable in rerun/revisit or in the auto-generated scripts. -image:scriptless-automation:scriptless-unsupported-action.png[width=1000,alt="An unsupported action performed in a manual session"] +image:_scriptless-automation:scriptless-unsupported-action.png[width=1000,alt="An unsupported action performed in a manual session"] In the Session Explorer, the unsupported actions are highlighted in red: -image:scriptless-automation:unsupported-action-session-explorer.png[width=1000,alt="An unsupported action displayed in Session Explorer"] +image:_scriptless-automation:unsupported-action-session-explorer.png[width=1000,alt="An unsupported action displayed in Session Explorer"] In the Automated Test Case, only test steps before the unsupported action are included: -image:scriptless-automation:unsupported-action-automated-test-case.png[width=1000,alt="An unsupported action displayed in Automated Test Case"] +image:_scriptless-automation:unsupported-action-automated-test-case.png[width=1000,alt="An unsupported action displayed in Automated Test Case"] // end::supported-actions[] diff --git a/docs/modules/_scriptless-automation/images/authentication.png b/docs/modules/_scriptless-automation/images/authentication.png new file mode 100644 index 000000000..0efdad8af Binary files /dev/null and b/docs/modules/_scriptless-automation/images/authentication.png differ diff --git a/docs/modules/_scriptless-automation/images/body.png b/docs/modules/_scriptless-automation/images/body.png new file mode 100644 index 000000000..697abff5f Binary files /dev/null and b/docs/modules/_scriptless-automation/images/body.png differ diff --git a/docs/modules/_scriptless-automation/images/click-rerun-to-run-automation-context.png b/docs/modules/_scriptless-automation/images/click-rerun-to-run-automation-context.png new file mode 100644 index 000000000..3e80f3ad9 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/click-rerun-to-run-automation-context.png differ diff --git a/docs/modules/_scriptless-automation/images/color-assertion.png b/docs/modules/_scriptless-automation/images/color-assertion.png new file mode 100644 index 000000000..e739ff88e Binary files /dev/null and b/docs/modules/_scriptless-automation/images/color-assertion.png differ diff --git a/docs/modules/_scriptless-automation/images/data-driven-assign-device.png b/docs/modules/_scriptless-automation/images/data-driven-assign-device.png new file mode 100644 index 000000000..016fd6d7b Binary files /dev/null and b/docs/modules/_scriptless-automation/images/data-driven-assign-device.png differ diff --git a/docs/modules/_scriptless-automation/images/data-driven-new-dataset.png b/docs/modules/_scriptless-automation/images/data-driven-new-dataset.png new file mode 100644 index 000000000..2f9b20d54 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/data-driven-new-dataset.png differ diff --git a/docs/modules/_scriptless-automation/images/data-driven-replace-value.png b/docs/modules/_scriptless-automation/images/data-driven-replace-value.png new file mode 100644 index 000000000..cb352e884 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/data-driven-replace-value.png differ diff --git a/docs/modules/_scriptless-automation/images/data-driven-test-step-edited.png b/docs/modules/_scriptless-automation/images/data-driven-test-step-edited.png new file mode 100644 index 000000000..2d438313b Binary files /dev/null and b/docs/modules/_scriptless-automation/images/data-driven-test-step-edited.png differ diff --git a/docs/modules/_scriptless-automation/images/exit-session-context.png b/docs/modules/_scriptless-automation/images/exit-session-context.png new file mode 100644 index 000000000..800d04d6d Binary files /dev/null and b/docs/modules/_scriptless-automation/images/exit-session-context.png differ diff --git a/docs/modules/_scriptless-automation/images/ignore-remediation.png b/docs/modules/_scriptless-automation/images/ignore-remediation.png new file mode 100644 index 000000000..de65be27a Binary files /dev/null and b/docs/modules/_scriptless-automation/images/ignore-remediation.png differ diff --git a/docs/modules/_scriptless-automation/images/performance-validation.png b/docs/modules/_scriptless-automation/images/performance-validation.png new file mode 100644 index 000000000..c9d7086ef Binary files /dev/null and b/docs/modules/_scriptless-automation/images/performance-validation.png differ diff --git a/docs/modules/_scriptless-automation/images/remediate-the-validation.png b/docs/modules/_scriptless-automation/images/remediate-the-validation.png new file mode 100644 index 000000000..9aadb8a5b Binary files /dev/null and b/docs/modules/_scriptless-automation/images/remediate-the-validation.png differ diff --git a/docs/modules/_scriptless-automation/images/rerun-configurations-data-driven-testing-context.png b/docs/modules/_scriptless-automation/images/rerun-configurations-data-driven-testing-context.png new file mode 100644 index 000000000..5c532efa0 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/rerun-configurations-data-driven-testing-context.png differ diff --git a/docs/modules/_scriptless-automation/images/scriptless-automated-test-case-button.png b/docs/modules/_scriptless-automation/images/scriptless-automated-test-case-button.png new file mode 100644 index 000000000..3bc7053d7 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/scriptless-automated-test-case-button.png differ diff --git a/docs/modules/_scriptless-automation/images/scriptless-deselect-device-from-bundles.png b/docs/modules/_scriptless-automation/images/scriptless-deselect-device-from-bundles.png new file mode 100644 index 000000000..efe65ec88 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/scriptless-deselect-device-from-bundles.png differ diff --git a/docs/modules/_scriptless-automation/images/scriptless-rerun-button.png b/docs/modules/_scriptless-automation/images/scriptless-rerun-button.png new file mode 100644 index 000000000..be8932cd9 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/scriptless-rerun-button.png differ diff --git a/docs/modules/_scriptless-automation/images/scriptless-rerun-configurations-device-bundles-context.png b/docs/modules/_scriptless-automation/images/scriptless-rerun-configurations-device-bundles-context.png new file mode 100644 index 000000000..6814e3a7f Binary files /dev/null and b/docs/modules/_scriptless-automation/images/scriptless-rerun-configurations-device-bundles-context.png differ diff --git a/docs/modules/_scriptless-automation/images/scriptless-select-device-bundles.png b/docs/modules/_scriptless-automation/images/scriptless-select-device-bundles.png new file mode 100644 index 000000000..05bf375a5 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/scriptless-select-device-bundles.png differ diff --git a/docs/modules/_scriptless-automation/images/scriptless-unsupported-action.png b/docs/modules/_scriptless-automation/images/scriptless-unsupported-action.png new file mode 100644 index 000000000..6a17fff69 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/scriptless-unsupported-action.png differ diff --git a/docs/modules/_scriptless-automation/images/search-for-a-session-closeup.png b/docs/modules/_scriptless-automation/images/search-for-a-session-closeup.png new file mode 100644 index 000000000..76ba34d72 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/search-for-a-session-closeup.png differ diff --git a/docs/modules/_scriptless-automation/images/select-annotate-closeup.png b/docs/modules/_scriptless-automation/images/select-annotate-closeup.png new file mode 100644 index 000000000..d43508f0d Binary files /dev/null and b/docs/modules/_scriptless-automation/images/select-annotate-closeup.png differ diff --git a/docs/modules/_scriptless-automation/images/select-automation-test-case-context.png b/docs/modules/_scriptless-automation/images/select-automation-test-case-context.png new file mode 100644 index 000000000..e64da5875 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/select-automation-test-case-context.png differ diff --git a/docs/modules/_scriptless-automation/images/select-devices-context copy.png b/docs/modules/_scriptless-automation/images/select-devices-context copy.png new file mode 100644 index 000000000..11fce7c01 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/select-devices-context copy.png differ diff --git a/docs/modules/_scriptless-automation/images/select-devices-context.png b/docs/modules/_scriptless-automation/images/select-devices-context.png new file mode 100644 index 000000000..59393ac4d Binary files /dev/null and b/docs/modules/_scriptless-automation/images/select-devices-context.png differ diff --git a/docs/modules/_scriptless-automation/images/select-x-to-end-session-context.png b/docs/modules/_scriptless-automation/images/select-x-to-end-session-context.png new file mode 100644 index 000000000..f41a6575d Binary files /dev/null and b/docs/modules/_scriptless-automation/images/select-x-to-end-session-context.png differ diff --git a/docs/modules/_scriptless-automation/images/send-postman.png b/docs/modules/_scriptless-automation/images/send-postman.png new file mode 100644 index 000000000..704596e2e Binary files /dev/null and b/docs/modules/_scriptless-automation/images/send-postman.png differ diff --git a/docs/modules/_scriptless-automation/images/test-run-view.png b/docs/modules/_scriptless-automation/images/test-run-view.png new file mode 100644 index 000000000..ff26b050e Binary files /dev/null and b/docs/modules/_scriptless-automation/images/test-run-view.png differ diff --git a/docs/modules/_scriptless-automation/images/text-assertion.png b/docs/modules/_scriptless-automation/images/text-assertion.png new file mode 100644 index 000000000..b1fde46df Binary files /dev/null and b/docs/modules/_scriptless-automation/images/text-assertion.png differ diff --git a/docs/modules/_scriptless-automation/images/ui-remediation-window.png b/docs/modules/_scriptless-automation/images/ui-remediation-window.png new file mode 100644 index 000000000..de3385a3b Binary files /dev/null and b/docs/modules/_scriptless-automation/images/ui-remediation-window.png differ diff --git a/docs/modules/_scriptless-automation/images/ui-remediation2.png b/docs/modules/_scriptless-automation/images/ui-remediation2.png new file mode 100644 index 000000000..d4c8875c4 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/ui-remediation2.png differ diff --git a/docs/modules/_scriptless-automation/images/unsupported-action-automated-test-case.png b/docs/modules/_scriptless-automation/images/unsupported-action-automated-test-case.png new file mode 100644 index 000000000..195878466 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/unsupported-action-automated-test-case.png differ diff --git a/docs/modules/_scriptless-automation/images/unsupported-action-session-explorer.png b/docs/modules/_scriptless-automation/images/unsupported-action-session-explorer.png new file mode 100644 index 000000000..ccbc1ba51 Binary files /dev/null and b/docs/modules/_scriptless-automation/images/unsupported-action-session-explorer.png differ diff --git a/docs/modules/_test-management/images/annotate-warning-context.png b/docs/modules/_test-management/images/annotate-warning-context.png new file mode 100644 index 000000000..8994d8cf4 Binary files /dev/null and b/docs/modules/_test-management/images/annotate-warning-context.png differ diff --git a/docs/modules/_test-management/images/bin-icon.png b/docs/modules/_test-management/images/bin-icon.png new file mode 100644 index 000000000..2c3d88321 Binary files /dev/null and b/docs/modules/_test-management/images/bin-icon.png differ diff --git a/docs/modules/_test-management/images/convert-to-test-case-context.png b/docs/modules/_test-management/images/convert-to-test-case-context.png new file mode 100644 index 000000000..e0dc2b150 Binary files /dev/null and b/docs/modules/_test-management/images/convert-to-test-case-context.png differ diff --git a/docs/modules/_test-management/images/data-driven-testing.png b/docs/modules/_test-management/images/data-driven-testing.png new file mode 100644 index 000000000..f9779a5cf Binary files /dev/null and b/docs/modules/_test-management/images/data-driven-testing.png differ diff --git a/docs/modules/_test-management/images/define-new-dataset.png b/docs/modules/_test-management/images/define-new-dataset.png new file mode 100644 index 000000000..b7f2723ea Binary files /dev/null and b/docs/modules/_test-management/images/define-new-dataset.png differ diff --git a/docs/modules/_test-management/images/delete-test-session.png b/docs/modules/_test-management/images/delete-test-session.png new file mode 100644 index 000000000..7fae55768 Binary files /dev/null and b/docs/modules/_test-management/images/delete-test-session.png differ diff --git a/docs/modules/_test-management/images/delete-test-step-popup-context.png b/docs/modules/_test-management/images/delete-test-step-popup-context.png new file mode 100644 index 000000000..4f1e93bb7 Binary files /dev/null and b/docs/modules/_test-management/images/delete-test-step-popup-context.png differ diff --git a/docs/modules/_test-management/images/delete-test-steps-context.png b/docs/modules/_test-management/images/delete-test-steps-context.png new file mode 100644 index 000000000..570b96948 Binary files /dev/null and b/docs/modules/_test-management/images/delete-test-steps-context.png differ diff --git a/docs/modules/_test-management/images/export-appium.png b/docs/modules/_test-management/images/export-appium.png new file mode 100644 index 000000000..3c0e65db0 Binary files /dev/null and b/docs/modules/_test-management/images/export-appium.png differ diff --git a/docs/modules/_test-management/images/remediation-ignore-action.png b/docs/modules/_test-management/images/remediation-ignore-action.png new file mode 100644 index 000000000..475d9151e Binary files /dev/null and b/docs/modules/_test-management/images/remediation-ignore-action.png differ diff --git a/docs/modules/_test-management/images/run-scriptless.png b/docs/modules/_test-management/images/run-scriptless.png new file mode 100644 index 000000000..cfbbd4c2f Binary files /dev/null and b/docs/modules/_test-management/images/run-scriptless.png differ diff --git a/docs/modules/_test-management/images/search-for-a-session-context.png b/docs/modules/_test-management/images/search-for-a-session-context.png new file mode 100644 index 000000000..260f0c5e9 Binary files /dev/null and b/docs/modules/_test-management/images/search-for-a-session-context.png differ diff --git a/docs/modules/_test-management/images/select-a-session-context.png b/docs/modules/_test-management/images/select-a-session-context.png new file mode 100644 index 000000000..de9e1e57d Binary files /dev/null and b/docs/modules/_test-management/images/select-a-session-context.png differ diff --git a/docs/modules/_test-management/images/select-annotate-closeup.png b/docs/modules/_test-management/images/select-annotate-closeup.png new file mode 100644 index 000000000..d43508f0d Binary files /dev/null and b/docs/modules/_test-management/images/select-annotate-closeup.png differ diff --git a/docs/modules/_test-management/images/select-automated-test-case-context.png b/docs/modules/_test-management/images/select-automated-test-case-context.png new file mode 100644 index 000000000..446bc9550 Binary files /dev/null and b/docs/modules/_test-management/images/select-automated-test-case-context.png differ diff --git a/docs/modules/_test-management/images/select-test-case.png b/docs/modules/_test-management/images/select-test-case.png new file mode 100644 index 000000000..3c5a22959 Binary files /dev/null and b/docs/modules/_test-management/images/select-test-case.png differ diff --git a/docs/modules/_test-management/images/select-test-management.png b/docs/modules/_test-management/images/select-test-management.png new file mode 100644 index 000000000..bcb35904b Binary files /dev/null and b/docs/modules/_test-management/images/select-test-management.png differ diff --git a/docs/modules/_test-management/images/set-new-value.png b/docs/modules/_test-management/images/set-new-value.png new file mode 100644 index 000000000..e47c784a5 Binary files /dev/null and b/docs/modules/_test-management/images/set-new-value.png differ diff --git a/docs/modules/_test-management/images/test-case-management-context.png b/docs/modules/_test-management/images/test-case-management-context.png new file mode 100644 index 000000000..108aa8389 Binary files /dev/null and b/docs/modules/_test-management/images/test-case-management-context.png differ diff --git a/docs/modules/_test-management/images/test-case-ready-to-go-context.png b/docs/modules/_test-management/images/test-case-ready-to-go-context.png new file mode 100644 index 000000000..0b4ba4d26 Binary files /dev/null and b/docs/modules/_test-management/images/test-case-ready-to-go-context.png differ diff --git a/docs/modules/_test-management/images/test-execution-plan.png b/docs/modules/_test-management/images/test-execution-plan.png new file mode 100644 index 000000000..6e0de16a6 Binary files /dev/null and b/docs/modules/_test-management/images/test-execution-plan.png differ diff --git a/docs/modules/_test-management/images/view-test-case.png b/docs/modules/_test-management/images/view-test-case.png new file mode 100644 index 000000000..9f80af9a6 Binary files /dev/null and b/docs/modules/_test-management/images/view-test-case.png differ diff --git a/docs/modules/_test-management/nav.adoc b/docs/modules/_test-management/nav.adoc new file mode 100644 index 000000000..3cf94d9fb --- /dev/null +++ b/docs/modules/_test-management/nav.adoc @@ -0,0 +1,9 @@ +.xref:index.adoc[] +* xref:_test-management:create-a-test-case.adoc[] +* xref:_test-management:manage-test-steps.adoc[] + +* xref:_test-management:remediation/index.adoc[] +** xref:_test-management:remediation/annotate-a-test-step.adoc[] +** xref:_test-management:remediation/crash-remediation.adoc[] +** xref:_test-management:remediation/element-selection-remediation.adoc[] +** xref:_test-management:remediation/ignore-remediation.adoc[] diff --git a/docs/modules/test-management/pages/create-a-test-case.adoc b/docs/modules/_test-management/pages/create-a-test-case.adoc similarity index 84% rename from docs/modules/test-management/pages/create-a-test-case.adoc rename to docs/modules/_test-management/pages/create-a-test-case.adoc index 8d3c434b8..d2fbd85a6 100644 --- a/docs/modules/test-management/pages/create-a-test-case.adoc +++ b/docs/modules/_test-management/pages/create-a-test-case.adoc @@ -1,13 +1,13 @@ = Create a test case :navtitle: Create a test case -Learn how to create a test case so you can run xref:scriptless-automation:index.adoc[Scriptless Automation]. +Learn how to create a test case so you can run xref:_scriptless-automation:index.adoc[Scriptless Automation]. == Before you start You'll need to complete the following: -* xref:scriptless-automation:create-a-baseline-session.adoc[Create a baseline session]. +* xref:_scriptless-automation:create-a-baseline-session.adoc[Create a baseline session]. * If applicable, xref:remediation/annotate-a-test-step.adoc[annotate any flagged test steps]. == Create a test case diff --git a/docs/modules/_test-management/pages/index.adoc b/docs/modules/_test-management/pages/index.adoc new file mode 100644 index 000000000..6b7ec4b55 --- /dev/null +++ b/docs/modules/_test-management/pages/index.adoc @@ -0,0 +1,15 @@ += Test management +:navtitle: Test management +:page-aliases: test-management:validation:index.adoc, test-management:validation:color-text-validation.adoc, test-management:validation:performance-validation.adoc, test-management:validation:text-validation.adoc + +NOTE: Text color validation, performance validation, and text validation are no longer supported. + +Manage test cases for you or your team. + +image:_test-management:test-case-management-context.png[width=1000, alt="The test management context"] + +== In this section + +include::../nav.adoc[lines=2..] + + diff --git a/docs/modules/test-management/pages/manage-test-steps.adoc b/docs/modules/_test-management/pages/manage-test-steps.adoc similarity index 89% rename from docs/modules/test-management/pages/manage-test-steps.adoc rename to docs/modules/_test-management/pages/manage-test-steps.adoc index 3b3b6f72b..f66bf9a33 100644 --- a/docs/modules/test-management/pages/manage-test-steps.adoc +++ b/docs/modules/_test-management/pages/manage-test-steps.adoc @@ -5,7 +5,7 @@ Learn how to manage the test steps, so you can improve your test case before run == Before you start -You'll need to xref:test-management:create-a-test-case.adoc[create a test case] from a baseline session. +You'll need to xref:_test-management:create-a-test-case.adoc[create a test case] from a baseline session. == Open a test case diff --git a/docs/modules/test-management/pages/remediation/annotate-a-test-step.adoc b/docs/modules/_test-management/pages/remediation/annotate-a-test-step.adoc similarity index 72% rename from docs/modules/test-management/pages/remediation/annotate-a-test-step.adoc rename to docs/modules/_test-management/pages/remediation/annotate-a-test-step.adoc index c76d6c5ae..795f61aa8 100644 --- a/docs/modules/test-management/pages/remediation/annotate-a-test-step.adoc +++ b/docs/modules/_test-management/pages/remediation/annotate-a-test-step.adoc @@ -1,11 +1,11 @@ = Annotate a test step for Scriptless Automation :navtitle: Annotate a test step -Learn how to annotate a test step from your xref:scriptless-automation:create-a-baseline-session.adoc[baseline session], so you can... +Learn how to annotate a test step from your xref:_scriptless-automation:create-a-baseline-session.adoc[baseline session], so you can... == Before you start -You'll need to xref:scriptless-automation:create-a-baseline-session.adoc[create a baseline session]. +You'll need to xref:_scriptless-automation:create-a-baseline-session.adoc[create a baseline session]. == Annotate a test step @@ -33,5 +33,5 @@ image:test-case-ready-to-go-context.png[width=1000,alt="Test cases are ready to These are the different options for annotating a test step: -** xref:test-management:remediation/crash-remediation.adoc[] -** xref:test-management:remediation/element-selection-remediation.adoc[] +** xref:_test-management:remediation/crash-remediation.adoc[] +** xref:_test-management:remediation/element-selection-remediation.adoc[] diff --git a/docs/modules/test-management/pages/remediation/crash-remediation.adoc b/docs/modules/_test-management/pages/remediation/crash-remediation.adoc similarity index 100% rename from docs/modules/test-management/pages/remediation/crash-remediation.adoc rename to docs/modules/_test-management/pages/remediation/crash-remediation.adoc diff --git a/docs/modules/test-management/pages/remediation/element-selection-remediation.adoc b/docs/modules/_test-management/pages/remediation/element-selection-remediation.adoc similarity index 84% rename from docs/modules/test-management/pages/remediation/element-selection-remediation.adoc rename to docs/modules/_test-management/pages/remediation/element-selection-remediation.adoc index 54328d979..0a1862efe 100644 --- a/docs/modules/test-management/pages/remediation/element-selection-remediation.adoc +++ b/docs/modules/_test-management/pages/remediation/element-selection-remediation.adoc @@ -3,10 +3,10 @@ The User Interface (UI) of an element often changes across multiple devices. This can interrupt revisits on different devices because it's challenging to manage the appearance of an element through various device ratios, like 18:9, 17:9, and 16:9. But, with Scriptless Automation, you can easily handle these challenges through the UI without any scripting. -image:scriptless-automation:ui-remediation-window.png[width=700,alt="UI Remediation window"] +image:_scriptless-automation:ui-remediation-window.png[width=700,alt="UI Remediation window"] When revisiting on a Galaxy J5, a blocker is raised because a UI change covers and makes the **Login** button un-clickable. You can resolve this by manually selecting the **Login** button in the Scriptless Automation Remediation. This selection becomes the new 'baseline' for the test case, ensuring smooth execution. -image:scriptless-automation:ui-remediation2.png[width=700,alt="Another view of UI Remediation window"] +image:_scriptless-automation:ui-remediation2.png[width=700,alt="Another view of UI Remediation window"] In situations where different screen ratios crop a list view cell, making it undetectable, this would typically result in a failed test case with other “Record & Playback” technologies. However, with Scriptless Automation, you can select the correct list view cell and submit the remediation to continue the test. diff --git a/docs/modules/test-management/pages/remediation/ignore-remediation.adoc b/docs/modules/_test-management/pages/remediation/ignore-remediation.adoc similarity index 84% rename from docs/modules/test-management/pages/remediation/ignore-remediation.adoc rename to docs/modules/_test-management/pages/remediation/ignore-remediation.adoc index 2dc76053d..48bad559e 100644 --- a/docs/modules/test-management/pages/remediation/ignore-remediation.adoc +++ b/docs/modules/_test-management/pages/remediation/ignore-remediation.adoc @@ -3,7 +3,7 @@ If you come across a non-annotated remediation and are certain it's an application issue that needs fixing for the next release, you can use the *Ignore* option. This will exclude the execution from the Scriptless Automation process, so you won't face the remediation again. -image:test-management:remediation-ignore-action.png[width=500,alt="The Ignore option under Blocker remediation's available actions"] +image:_test-management:remediation-ignore-action.png[width=500,alt="The Ignore option under Blocker remediation's available actions"] [width="100%",cols="15%,85%",options="header"] |=== diff --git a/docs/modules/_test-management/pages/remediation/index.adoc b/docs/modules/_test-management/pages/remediation/index.adoc new file mode 100644 index 000000000..458435d62 --- /dev/null +++ b/docs/modules/_test-management/pages/remediation/index.adoc @@ -0,0 +1,11 @@ += Remediation +:navtitle: Remediation + +Remediate your test cases before running Scriptless Automation. + +== In this section + +* xref:_test-management:remediation/annotate-a-test-step.adoc[] +* xref:_test-management:remediation/crash-remediation.adoc[] +* xref:_test-management:remediation/element-selection-remediation.adoc[] +* xref:_test-management:remediation/ignore-remediation.adoc[] diff --git a/docs/modules/automation-testing/pages/scripting/auto-generate-an-appium-script.adoc b/docs/modules/automation-testing/pages/scripting/auto-generate-an-appium-script.adoc index dc35751b0..6f4f564c6 100644 --- a/docs/modules/automation-testing/pages/scripting/auto-generate-an-appium-script.adoc +++ b/docs/modules/automation-testing/pages/scripting/auto-generate-an-appium-script.adoc @@ -5,7 +5,7 @@ Learn how to generate an Appium script from a manual session, so you can start b == Before you start -* xref:scriptless-automation:create-a-baseline-session.adoc[] +* xref:_scriptless-automation:create-a-baseline-session.adoc[] [#_export_your_script] == Export your script @@ -14,7 +14,7 @@ After creating the baseline session, search for your session, open the session o image:automation-testing:export-appium-script-context.png[width=1200, alt="The automated test case for the selected session."] -Next, select the *Export Appium Script* icon. If you don't see an icon, check if your session xref:test-management:remediation/index.adoc[requires remediation]. +Next, select the *Export Appium Script* icon. If you don't see an icon, check if your session xref:_test-management:remediation/index.adoc[requires remediation]. image:automation-testing:export-appium-script-closeup.png[width=750, alt="Session overview tabs, including Automated Test Case, Jira Integration, Rerun, and the Appium Export icon."] diff --git a/docs/modules/organization/pages/device-bundles/create-a-device-bundle.adoc b/docs/modules/organization/pages/device-bundles/create-a-device-bundle.adoc index 7e6f9d526..bcff5bb54 100644 --- a/docs/modules/organization/pages/device-bundles/create-a-device-bundle.adoc +++ b/docs/modules/organization/pages/device-bundles/create-a-device-bundle.adoc @@ -1,7 +1,7 @@ = Create a device bundle :navtitle: Create a device bundle -Learn how to create a new device bundle so users in your organization can run xref:scriptless-automation:index.adoc[Scriptless Automation] on devices in parallel. +Learn how to create a new device bundle so users in your organization can run xref:_scriptless-automation:index.adoc[Scriptless Automation] on devices in parallel. [NOTE] Device bundles are operating system (OS) specific so Android and iOS devices cannot be bundled together. diff --git a/docs/modules/organization/pages/device-bundles/manage-device-bundles.adoc b/docs/modules/organization/pages/device-bundles/manage-device-bundles.adoc index 7fc90802e..be0e64e18 100644 --- a/docs/modules/organization/pages/device-bundles/manage-device-bundles.adoc +++ b/docs/modules/organization/pages/device-bundles/manage-device-bundles.adoc @@ -1,7 +1,7 @@ = Manage device bundles :navtitle: Manage device bundles -Learn how to manage the device bundles in your organization so users can use them to run xref:scriptless-automation:index.adoc[Scriptless Automation] on devices in parallel. You can only manage device bundles xref:device-bundles/create-a-device-bundle.adoc[you or your team created]. +Learn how to manage the device bundles in your organization so users can use them to run xref:_scriptless-automation:index.adoc[Scriptless Automation] on devices in parallel. You can only manage device bundles xref:device-bundles/create-a-device-bundle.adoc[you or your team created]. == View a device bundle diff --git a/docs/modules/reporting/pages/usage-report/report-metadata.adoc b/docs/modules/reporting/pages/usage-report/report-metadata.adoc index 4f1e7df3a..08b866162 100644 --- a/docs/modules/reporting/pages/usage-report/report-metadata.adoc +++ b/docs/modules/reporting/pages/usage-report/report-metadata.adoc @@ -44,23 +44,23 @@ xref:manual-testing:start-a-mixed-session.adoc[Mixed test sessions]. === Scriptless on Manual -xref:scriptless-automation:index.adoc[Scriptless test sessions] that use a manual test session as the base. +xref:_scriptless-automation:index.adoc[Scriptless test sessions] that use a manual test session as the base. === Scriptless on Automation -xref:scriptless-automation:index.adoc[Scriptless test sessions] that use an automation test session as the base. +xref:_scriptless-automation:index.adoc[Scriptless test sessions] that use an automation test session as the base. === Test Case -xref:test-management:index.adoc[Test cases]. +xref:_test-management:index.adoc[Test cases]. === Text Validation -xref:test-management:validation/text-validation.adoc[Text validation] within a Scriptless session. +xref:_test-management:validation/text-validation.adoc[Text validation] within a Scriptless session. === Color Validation -xref:test-management:validation/color-text-validation.adoc[Color text validation] within a Scriptless session. +xref:_test-management:validation/color-text-validation.adoc[Color text validation] within a Scriptless session. === Font Size (WBI) diff --git a/docs/modules/scriptless-automation/nav.adoc b/docs/modules/scriptless-automation/nav.adoc index 74637104c..80e4f8d7a 100644 --- a/docs/modules/scriptless-automation/nav.adoc +++ b/docs/modules/scriptless-automation/nav.adoc @@ -1,13 +1,6 @@ .xref:index.adoc[] -* xref:scriptless-automation:create-a-baseline-session.adoc[] - -* Get a session ID -** xref:get-a-session-id/using-the-kobiton-portal.adoc[] -** xref:get-a-session-id/using-the-kobiton-api.adoc[] - -* xref:run-scriptless-with-the-kobiton-portal.adoc[] -* xref:run-scriptless-with-the-kobiton-api.adoc[] -* xref:automation-testing:scripting/auto-generate-an-appium-script.adoc[] -* xref:scriptless-best-practices.adoc[] -* xref:scriptless-requirements.adoc[] +* xref:scriptless-automation:baseline-session.adoc[] +* xref:scriptless-automation:run-scriptless.adoc[] +* xref:scriptless-automation:run-scriptless-api.adoc[] +* xref:scriptless-automation:best-practices.adoc[] diff --git a/docs/modules/scriptless-automation/pages/baseline-session.adoc b/docs/modules/scriptless-automation/pages/baseline-session.adoc new file mode 100644 index 000000000..d9a424ec8 --- /dev/null +++ b/docs/modules/scriptless-automation/pages/baseline-session.adoc @@ -0,0 +1,79 @@ += Create a baseline session + +== Overview + +A baseline session is a manual test session that serves as the source for automated test execution. It captures how an application is exercised on a real device and provides the reference used to create a test case. + +Baseline sessions are created through manual interaction in the Kobiton platform and are required before a test can be run automatically. + +== What a baseline session is + +A baseline session is a manual session performed on a real device through the platform. + +During the session, a user interacts with the application as they would during normal testing. These interactions are recorded by the platform and later reused to define a test flow. + +Once converted into a test case, the baseline session becomes the reference for future test execution. + +== Create a baseline session + +Baseline sessions are created by running a manual test session with capture enabled and completing a defined test flow. + +=== 1. Start a manual session and enable capture + +From the Devices view in the Kobiton platform, select a device and launch a manual session. + +Before interacting with the application, select the *Play* button to enable session capturing mode. + +Session capturing mode improves the accuracy of element detection by capturing precise element bounding boxes. Actions performed while capturing is enabled provide the most reliable results for scriptless execution. + +If your test requires an application, install or select the app at the start of the session. + +NOTE: Actions performed before capture is enabled are still recorded, but may result in reduced element accuracy during automated execution. + +=== 2. Perform the test flow + +Interact with the application as you would during normal testing. + +While capture is enabled, the platform records supported interactions and associates them with the current application state. These recorded interactions form the basis of the baseline session. + +Focus on completing a single, intentional test flow from start to finish. + +=== 3. End the session and convert it to a test case + +When the test flow is complete, exit the manual session by selecting the *X* icon. + +After the session ends, it appears in Session Explorer. From Session Explorer, the session can be converted into a test case. Once converted, the test case is managed through Test Management, where it can be reviewed, edited, and used to create test runs. + +For details, see xref:test-management/test-cases.adoc[Test cases]. + +// Screenshots: +// Launching a manual session from Devices +// Session Explorer showing a completed session + +== What makes a good baseline + +Baseline sessions are most reliable when they reflect a clear and stable test flow. + +Good baseline sessions typically: + +* Follow a single, intentional path through the application +* Use supported interactions only +* Avoid unnecessary navigation or exploratory actions + +Baseline quality directly affects how reliably a test can be executed later. + +== Platform considerations + +Baseline sessions are platform-specific. + +Separate baseline sessions are required for Android and iOS, even when the application behavior is similar. + +Baseline sessions must target a supported application context: a web application (Safari or Chrome), an app installed from the App Repository, or an existing app already available on the device. + +Manual sessions can be converted into test cases even if unsupported actions were recorded. Test cases that contain unsupported actions cannot be executed in a test run until those steps are removed and the test case is saved as a new version. + +== Related pages + +* xref:scriptless-automation:run-scriptless.adoc[Run a Scriptless test] +* xref:test-management:index.adoc[Test management] +* xref:scriptless-automation:best-practices.adoc[Best practices for creating an effective baseline session] diff --git a/docs/modules/scriptless-automation/pages/best-practices.adoc b/docs/modules/scriptless-automation/pages/best-practices.adoc new file mode 100644 index 000000000..9524ef417 --- /dev/null +++ b/docs/modules/scriptless-automation/pages/best-practices.adoc @@ -0,0 +1,69 @@ += Best practices + +== Overview + +Best practices help improve the reliability and consistency of automated test execution created from manual sessions. + +These guidelines are not required to run tests, but following them can reduce execution issues and minimize the need for remediation as test coverage expands. + +== Baseline session best practices + +Baseline sessions are most effective when they reflect a clear and stable test flow. + +When creating a baseline session: + +* Enable session capture by selecting the *Play* button before interacting with the application to ensure accurate element detection +* Follow a single, intentional path through the application +* Avoid exploratory actions or unnecessary navigation +* Use supported interactions only + +Baseline sessions that are focused and repeatable are easier to reuse and more reliable during execution. + +== Test design considerations + +Keeping tests concise improves maintainability. + +Shorter test flows are easier to understand, execute, and troubleshoot. Instead of extending a single test to cover many variations, related tests can be grouped and executed together. + +If a test requires frequent adjustments to continue running, recreating the baseline session may be more effective than repeatedly refining an unstable definition. + +== Execution consistency + +Consistency across devices and environments improves execution reliability. + +When possible: + +* Use similar device types and operating system versions +* Avoid mixing display modes within the same test run + +Greater variation across devices may increase the likelihood of execution differences that require review. + +== Scaling test execution + +Tests can be executed across many devices once a baseline has proven reliable. + +Expanding execution incrementally allows issues to surface early and keeps results easier to interpret. Grouping related tests helps maintain clarity as execution scales. + +== Reducing remediation effort + +Remediation is an expected part of automated execution, but its frequency can often be reduced. + +Clear baseline sessions, consistent navigation paths, and stable application states all contribute to smoother execution. Repeated remediation across many steps may indicate that the underlying test flow no longer reflects current application behavior. + +== Known limitations + +Some workflows are not well suited for this testing approach. + +Examples include: + +* Authentication flows that rely on one-time credentials +* Highly dynamic interfaces that change on each launch +* Complex gesture-based interactions + +In these cases, script-based automation may provide greater control. + +== Related pages + +* xref:scriptless-automation:baseline-session.adoc[Create a baseline session] +* xref:scriptless-automation:run-scriptless.adoc[Run a Scriptless test] +* xref:test-management:remediations.adoc[Remediations] diff --git a/docs/modules/scriptless-automation/pages/index.adoc b/docs/modules/scriptless-automation/pages/index.adoc index b7a14981e..bb44ca020 100644 --- a/docs/modules/scriptless-automation/pages/index.adoc +++ b/docs/modules/scriptless-automation/pages/index.adoc @@ -1,10 +1,62 @@ -= Scriptless Automation -:navtitle: Scriptless Automation += Scriptless automation -Automate your tests without writing scripts. +== Overview -image:automation-testing:automation-testing-index-context.png[width=1000,alt="A context to Scriptless Automation"] +Scriptless automation enables automated test execution based on interactions recorded during manual sessions. Tests are created from real device usage and can be executed repeatedly without requiring users to write or maintain automation scripts. -== In this section +Within the Kobiton platform, this capability builds on manual testing by capturing user interactions in a structured form and reusing them for repeatable execution across devices. -include::../nav.adoc[lines=2..] +Scriptless automation is used to reduce the effort required to run repetitive tests while preserving visibility into how tests are executed on real devices. + +== How it works + +Scriptless automation is based on recording test behavior once and reusing it for execution. + +=== Manual sessions as the source + +Testing begins with a manual session performed on a real device through the Kobiton platform. During the session, user interactions with the application are recorded. + +=== Tests created from recorded interactions + +Recorded interactions are converted into test definitions that represent a complete test flow. These definitions are stored and managed independently of execution. + +=== Tests executed without user-written scripts + +When a test is run, the platform executes the recorded interactions automatically. Automation scripts are generated and managed by the platform; users do not need to author or maintain code to run tests. + +== Requirements + +Scriptless automation requires the following: + +* An active Kobiton account with access to Test Management +* A supported mobile platform (Android or iOS) +* A supported application context, as described below + +=== Application under test + +The application under test must be accessible in a form supported for Scriptless automation. + +Depending on the application type: + +* *Native or hybrid mobile apps* must be uploaded to the Kobiton App Repository before creating or running tests. Direct app upload during test execution is not supported. +* *Web applications* can be tested through supported mobile browsers without uploading an app. +* *Preinstalled system applications* already available on the device (such as Clock or Calculator) can be tested without additional setup. + +== Limitations + +Scriptless automation tests are platform-specific. Separate tests are required for Android and iOS. + +Some application types and flows are not supported or are a poor fit for Scriptless automation, including: + +* Game engine–based applications, such as Unity-based apps +* Canvas-based applications, where UI elements are not represented as standard view hierarchies +* Highly dynamic interfaces that change structure frequently at runtime +* Authentication flows that rely on one-time credentials, such as OTP or multi-factor authentication + +In these scenarios, script-based automation or alternative testing approaches may be more appropriate. + +== Related pages + +* xref:scriptless-automation:baseline-session.adoc[Create a baseline session] +* xref:scriptless-automation:run-scriptless.adoc[Run a Scriptless test] +* xref:test-management:index.adoc[Test management] diff --git a/docs/modules/scriptless-automation/pages/run-scriptless-api.adoc b/docs/modules/scriptless-automation/pages/run-scriptless-api.adoc new file mode 100644 index 000000000..7e163dec0 --- /dev/null +++ b/docs/modules/scriptless-automation/pages/run-scriptless-api.adoc @@ -0,0 +1,45 @@ += Run a Scriptless test using the API + +== Overview + +Tests created from recorded manual sessions can also be executed programmatically using the Kobiton API. + +API-based execution is typically used for advanced workflows, such as integrating test runs into automated pipelines or triggering tests as part of a larger system. The API provides an alternative to running tests through the platform UI. + +== When to use the API + +Using the API is appropriate when test execution needs to be: + +* Triggered automatically as part of a CI/CD pipeline +* Integrated with external systems or custom tooling +* Managed without manual interaction in the platform + +The API does not replace the platform UI. Test cases and test suites must still be created and managed through Test Management. + +== How API-based execution works + +API-based execution uses the same underlying test definitions as platform-based execution. + +Tests are created from baseline sessions and stored in Test Management. When triggered through the API, these existing test definitions are executed on the selected devices using the same execution logic as platform-initiated runs. + +Execution results are recorded in Test Management and can be reviewed in the platform UI. + +== Requirements + +To run tests using the API, the following are required: + +* An active Kobiton account with API access +* Existing test cases or test suites +* Familiarity with API-based workflows + +== API documentation + +Detailed information about available endpoints, request formats, and authentication is provided in the API documentation. + +For implementation details, see the API documentation. + +== Related pages + +* xref:scriptless-automation:run-scriptless.adoc[Run a Scriptless test in the platform] +* xref:test-management:index.adoc[Test management] +* xref:scriptless-automation:baseline-session.adoc[Create a baseline session] diff --git a/docs/modules/scriptless-automation/pages/run-scriptless.adoc b/docs/modules/scriptless-automation/pages/run-scriptless.adoc new file mode 100644 index 000000000..bc834b1e2 --- /dev/null +++ b/docs/modules/scriptless-automation/pages/run-scriptless.adoc @@ -0,0 +1,61 @@ += Run a Scriptless test + +== Overview + +Running a Scriptless test applies an existing test definition to one or more devices. Tests are executed automatically based on interactions previously recorded during a manual session. + +In the Kobiton platform, Scriptless tests are run through Test Management. Execution results are captured for review and can be rerun as needed. + +== Before you begin + +Before running a test, ensure the following: + +* A test case has been created from a baseline session +* The application under test is available in a supported application context (native app, web application, or preinstalled app) +* At least one compatible device is available + +== Run a test in the platform + +Tests are run from Test Management in the Kobiton platform. + +From Test Management, users can create a test run using a single test case or a test suite. During setup, the test definition, application version, and target devices are selected. + +Once a test run starts, the platform executes the recorded interactions automatically on the selected devices. Test execution does not require user interaction while the run is in progress. + +== What happens during execution + +During execution, the platform replays the recorded interactions step by step on each device. + +For each step, the system attempts to match the recorded interaction to the current application state. Execution continues until all steps are completed or until user action is required. + +Execution progress and status are visible while the test is running. + +== Review test results + +After execution completes, results are available in Test Management. + +Each test run includes: + +* Overall execution status +* Device-level execution details +* Validation results, such as Crash, Accessibility, and Response Time findings + +If execution cannot proceed for a particular step, the test run is marked as requiring action and must be remediated before execution can continue. + +== Rerun a test + +Test runs can be rerun using the same configuration. + +Reruns repeat the original execution using the same test definition and device selection. This is commonly used after issues are addressed or when validating execution behavior. + +== Advanced usage + +Tests can also be triggered programmatically using the Kobiton API. This approach is typically used for automated pipelines or advanced workflows. + +For details, see the link:https://api.kobiton.com/docs[API documentation]. + +== Related pages + +* xref:scriptless-automation:baseline-session.adoc[Create a baseline session] +* xref:test-management:index.adoc[Test management] +* xref:test-management:remediations.adoc[Remediations] diff --git a/docs/modules/test-management/nav.adoc b/docs/modules/test-management/nav.adoc index 592843753..f63036424 100644 --- a/docs/modules/test-management/nav.adoc +++ b/docs/modules/test-management/nav.adoc @@ -1,9 +1,8 @@ .xref:index.adoc[] -* xref:test-management:create-a-test-case.adoc[] -* xref:test-management:manage-test-steps.adoc[] -* xref:test-management:remediation/index.adoc[] -** xref:test-management:remediation/annotate-a-test-step.adoc[] -** xref:test-management:remediation/crash-remediation.adoc[] -** xref:test-management:remediation/element-selection-remediation.adoc[] -** xref:test-management:remediation/ignore-remediation.adoc[] +* xref:test-management:test-cases.adoc[] +* xref:test-management:generate-appium.adoc[] +* xref:test-management:test-runs.adoc[] +* xref:test-management:test-suites.adoc[] +* xref:test-management:test-reruns.adoc[] +* xref:test-management:remediations.adoc[] diff --git a/docs/modules/test-management/pages/generate-appium.adoc b/docs/modules/test-management/pages/generate-appium.adoc new file mode 100644 index 000000000..b3c4b309e --- /dev/null +++ b/docs/modules/test-management/pages/generate-appium.adoc @@ -0,0 +1,54 @@ += Generate an Appium script + +== Overview + +An Appium script can be generated from an existing test case created from a baseline session. + +The generated script reflects the recorded interactions captured during the manual session and can be used in external, script-based automation workflows. + +Generating an Appium script is optional. Running tests in the platform does not require exporting or maintaining scripts. + +== Before you begin + +Before generating an Appium script: + +* A baseline session must be completed +* The baseline session must be converted into a test case +* Any required remediation must be resolved + +== Export an Appium script + +Appium scripts are generated from the test case view in the platform. + +From the test case: + +* Open the test case associated with the baseline session +* Select the option to export an Appium script +* Choose a supported language and test framework +* Download the generated script + +The exported script includes the recorded actions and element interactions captured during the baseline session. + +// Screenshots: +// One or two images showing the export option and language/framework selection + +== Supported languages and frameworks + +Appium scripts can be generated using the following language and framework combinations: + +* Java (TestNG) +* Java (JUnit) +* Node.js (Mocha) +* C# (NUnit) + +== Using the generated script + +The generated Appium script can be extended, modified, or integrated into existing automation frameworks. + +Changes made to the script outside the platform do not affect the original test case or future test runs executed in the platform. + +== Related pages + +* xref:test-management:test-cases.adoc[Test cases] +* xref:scriptless-automation:baseline-session.adoc[Create a baseline session] +* xref:test-management:index.adoc[Test management] diff --git a/docs/modules/test-management/pages/index.adoc b/docs/modules/test-management/pages/index.adoc index e3bc6a958..9b417c12c 100644 --- a/docs/modules/test-management/pages/index.adoc +++ b/docs/modules/test-management/pages/index.adoc @@ -1,15 +1,52 @@ -= Test management -:navtitle: Test management -:page-aliases: test-management:validation:index.adoc, test-management:validation:color-text-validation.adoc, test-management:validation:performance-validation.adoc, test-management:validation:text-validation.adoc += Test Management -NOTE: Text color validation, performance validation, and text validation are no longer supported. +== Overview -Manage test cases for you or your team. +Test Management provides the structure used to organize, execute, and review automated tests created from manual sessions. -image:test-management:test-case-management-context.png[width=1000, alt="The test management context"] +Within the Kobiton platform, Test Management is where test cases are stored, grouped, and reused. It also provides visibility into test execution and results across devices and application versions. -== In this section +== How Test Management fits into testing -include::../nav.adoc[lines=2..] +Test Management separates test definition from test execution. +Test cases represent recorded test flows derived from baseline sessions. These test cases can be executed repeatedly without recreating the original session. +Each execution produces a test run, which records the outcome of running a test case or test suite on one or more devices. Test runs and their results are tracked independently of the test definitions they use. + +== Core objects + +Test Management is built around a small set of core objects. + +=== Test cases + +Reusable definitions created from baseline sessions. Test cases describe what should be tested. + +=== Test runs + +Individual executions of a test case or test suite. Test runs capture results and execution details. + +=== Test suites + +Collections of related test cases grouped for execution and organization. + +=== Test reruns + +Repeat executions of a previous test run using the same configuration, typically used for validation. + +== What’s next + +From Test Management, users can: + +* Create and manage test cases +* Run tests across devices +* Review execution results +* Rerun tests after remediation +* Generate Appium scripts for automation testing + +== Related pages + +* xref:test-management:test-cases.adoc[Test cases] +* xref:test-management:test-runs.adoc[Test runs] +* xref:test-management:test-suites.adoc[Test suites] +* xref:test-management:test-reruns.adoc[Test reruns] diff --git a/docs/modules/test-management/pages/remediation/index.adoc b/docs/modules/test-management/pages/remediation/index.adoc deleted file mode 100644 index 294d55ebd..000000000 --- a/docs/modules/test-management/pages/remediation/index.adoc +++ /dev/null @@ -1,11 +0,0 @@ -= Remediation -:navtitle: Remediation - -Remediate your test cases before running Scriptless Automation. - -== In this section - -* xref:test-management:remediation/annotate-a-test-step.adoc[] -* xref:test-management:remediation/crash-remediation.adoc[] -* xref:test-management:remediation/element-selection-remediation.adoc[] -* xref:test-management:remediation/ignore-remediation.adoc[] diff --git a/docs/modules/test-management/pages/remediations.adoc b/docs/modules/test-management/pages/remediations.adoc new file mode 100644 index 000000000..981cebf73 --- /dev/null +++ b/docs/modules/test-management/pages/remediations.adoc @@ -0,0 +1,61 @@ += Remediations + +== Overview + +Remediation addresses situations where automated execution cannot proceed as recorded. + +During execution, the platform attempts to match recorded interactions to the current application state on the device. When a step cannot be completed with sufficient confidence, execution pauses and user input is required before the test can continue. + +Remediation allows the test definition to be corrected without recreating the original baseline session. + +== When remediation is required + +When a test run encounters a blocker, its status indicates that user action is required. + +Remediation is triggered when an automated execution cannot reliably complete a recorded step. This typically occurs when a UI element cannot be identified due to layout changes, screen size differences, or other variations across devices. + +When this happens, execution is paused until the blocker is resolved. + +Remediation is an expected part of maintaining reliable automated execution, particularly when tests are run across multiple devices or application versions. + +== Remediate a test run + +When a test run encounters a blocker, its status reflects that action is required. + +From the test run results, users can review where execution stopped and access the associated session. Remediation updates the test case definition so that future executions can proceed past the blocked step. + +After remediation is submitted, the test can be rerun using the same configuration. + +// Screenshot suggestion: +// Test Run results view showing a blocked step and remediation entry point + +== Remediate in Session Explorer + +Session Explorer provides a detailed view of execution behavior. + +From Session Explorer, users can review the recorded baseline step alongside the execution attempt and select the correct UI element when a mismatch occurs. + +Remediation submitted through Session Explorer is applied to the test case and used in subsequent executions across devices. + +// Screenshot suggestion: +// Session Explorer view showing baseline step vs execution attempt + +== Live remediation + +Live remediation is available in limited scenarios during active execution. + +Live remediation allows users to intervene while a test is running, correct a blocked step, and continue execution without terminating the session. This approach is useful when immediate validation is required. + +Availability and behavior may vary depending on execution context. + +== After remediation + +Remediation updates the test case definition. + +Once remediation is complete, rerunning the test validates that execution can proceed using the updated definition. Previous test runs remain unchanged and available for reference. + +== Related pages + +* xref:test-management:test-runs.adoc[Test runs] +* xref:test-management:test-reruns.adoc[Test reruns] +* xref:scriptless-automation:best-practices.adoc[Best practices] diff --git a/docs/modules/test-management/pages/test-cases.adoc b/docs/modules/test-management/pages/test-cases.adoc new file mode 100644 index 000000000..5555c0a35 --- /dev/null +++ b/docs/modules/test-management/pages/test-cases.adoc @@ -0,0 +1,86 @@ += Test cases + +== Overview + +A test case represents a reusable test flow created from a baseline session. + +Test cases store the recorded interactions captured during a manual session and serve as the definition used for automated execution. Once created, a test case can be run repeatedly without recreating the original session. + +== Create a test case + +Test cases are created from completed baseline sessions. + +A test case can be created in either of the following ways: + +* By converting a completed manual session from Session Explorer +* From the Test Case list in Test Management, where completed manual sessions are available for conversion + +In both cases, the test case is created from a recorded manual session. The recorded interactions from the baseline session define the test flow. + +Converting a session to a test case unlocks additional capabilities, including running the test across devices and generating an Appium script. + +After conversion, the option to view the test case becomes available. Selecting this opens the Test Case details view, where the recorded steps and interactions can be reviewed and managed. + +Once created, the test case is available in Test Management for execution and further management. + +// Screenshot suggestion: +// Show the button text change from "Convert to Test Case" to "View Test Case" + +== Manage test cases + +Test cases are managed through Test Management in the Kobiton platform. + +=== Test Case list + +The Test Case list provides an overview of all test cases available in the account. + +From this view, users can locate test cases, review basic information, and open individual test cases for detailed review and management. + +=== Test Case details + +The Test Case details view displays the recorded steps captured from the baseline session. + +From this view, users can: + +* Review recorded test steps +* Remediate test steps when element selection requires correction +* Update test case metadata +* Assign the test case to one or more test suites +* Clone the test case +* Delete the test case +* Generate an Appium script + +Test case details reflect the current version of the test case. + +== Editing and versioning + +Changes to a test case create new versions. + +Versioning allows test cases to evolve as applications change while preserving previous definitions. Execution results remain associated with the version used at the time of execution. + +== Generate an Appium script + +=== What it is + +An Appium script can be generated from a test case created from a baseline session. + +From the Test Case details view, users can generate an Appium script based on the recorded interactions in the test case. This script represents the same test flow and can be used as a starting point for script-based automation. + +The option to generate an Appium script is available only for test cases created from recorded sessions. + +=== When it’s used + +Generated Appium scripts are typically used when test definitions need to be extended, customized, or integrated into script-based automation workflows. + +Using Scriptless automation does not require generating or maintaining Appium scripts. + +For supported languages and frameworks, see xref:test-management/generate-appium.adoc[Generate an Appium script]. + +// Screenshot: +// Appium UI is documented on the linked page, not here + +== Related pages + +* xref:scriptless-automation:baseline-session.adoc[Create a baseline session] +* xref:test-management:test-runs.adoc[Test runs] +* xref:test-management:test-suites.adoc[Test suites] diff --git a/docs/modules/test-management/pages/test-reruns.adoc b/docs/modules/test-management/pages/test-reruns.adoc new file mode 100644 index 000000000..1703719dd --- /dev/null +++ b/docs/modules/test-management/pages/test-reruns.adoc @@ -0,0 +1,44 @@ += Test reruns + +== Overview + +A test rerun is a repeat execution of a previous test run using the same configuration. + +Reruns are used to validate results without redefining the test or changing execution settings. Each rerun is recorded as a separate execution and does not replace the original test run. + +== When test reruns are used + +Test reruns are typically used after an issue has been addressed or when execution needs to be validated again. + +Common scenarios include: + +* Verifying execution after remediation +* Confirming behavior following an application update +* Repeating a test run to validate consistency + +Reruns ensure that results can be compared directly across executions. + +// Screenshot suggestion: +// Test Run list showing a rerun entry with its name + +== How reruns differ from new test runs + +A rerun repeats a previous test run using the same test definition, devices, and configuration. + +Creating a new test run allows changes to be made to the test setup, such as selecting different devices or modifying execution parameters. + +Both approaches create independent execution records, but reruns preserve the original execution context. + +== Where reruns appear + +Reruns are managed through Test Management and appear as separate test runs. + +Each rerun is listed with its own name and execution details. The rerun name includes a reference to the original test run ID, which can be used to identify the source of the rerun. + +Reruns can be reviewed individually to evaluate execution results after remediation or validation. + +== Related pages + +* xref:test-management:test-runs.adoc[Test runs] +* xref:test-management:remediations.adoc[Remediations when execution requires user input] +* xref:test-management:test-cases.adoc[Test cases] diff --git a/docs/modules/test-management/pages/test-runs.adoc b/docs/modules/test-management/pages/test-runs.adoc new file mode 100644 index 000000000..3a0deedc6 --- /dev/null +++ b/docs/modules/test-management/pages/test-runs.adoc @@ -0,0 +1,49 @@ += Test runs + +== Overview + +A test run represents a single execution of a test case or test suite on one or more devices. + +Test runs apply an existing test definition to selected devices and record the outcome of that execution. Each test run is tracked independently and retains its own results and execution data. + +== Create a test run + +Test runs are created from Test Management. + +A test run can be created using either a single test case or a test suite. During setup, users select the test definition, application version, and target devices. + +Once started, the platform executes the recorded interactions automatically on the selected devices. No user interaction is required while the test run is in progress. + +== Manage test runs + +Test runs are managed through Test Management in the platform. + +=== Test Run list + +The Test Run list displays all test runs associated with the account. From this view, users can review execution status, identify completed or in-progress runs, and access detailed results. + +=== Test Run details + +The Test Run details view provides information about a specific execution, including the test definition used, target devices, and overall status. + +From this view, users can review execution outcomes and navigate to related execution sessions. + +== Execution status + +Each test run reflects the outcome of execution across devices. + +Common statuses indicate whether execution completed successfully, failed due to an application issue, or requires user action before it can continue. + +== Rerun a test run + +Test runs can be rerun using the same configuration. + +Reruns repeat the original execution with the same test definition, devices, and settings. This is commonly used after remediation or when validating execution behavior. + +Reruns do not replace the original test run. Each execution is recorded separately. + +== Related pages + +* xref:test-management:test-cases.adoc[Test cases] +* xref:test-management:test-suites.adoc[Test suites] +* xref:test-management:remediations.adoc[Remediations] \ No newline at end of file diff --git a/docs/modules/test-management/pages/test-suites.adoc b/docs/modules/test-management/pages/test-suites.adoc new file mode 100644 index 000000000..45797c6e1 --- /dev/null +++ b/docs/modules/test-management/pages/test-suites.adoc @@ -0,0 +1,50 @@ += Test suites + +== Overview + +A test suite is a collection of related test cases grouped for organization and execution. + +Test suites allow multiple test cases to be run together using a single configuration. They are commonly used to manage related test coverage and to scale execution across devices. + +== Create a test suite + +Test suites are created in Test Management. + +To create a test suite, users define the operating system, provide a name and optional description, and select the test cases to include. Test cases can be added to or removed from the suite as coverage evolves. + +Creating a test suite groups related test cases for execution but does not change the behavior of the individual test cases. Each test case retains its own definition and execution logic. + +After a test suite is created, it appears in the Test Suite list, where it can be run or managed. + +== Manage test suites + +Test suites are managed from the Test Suite list in Test Management. + +From the list, users can: + +* Run the test suite to create a test run +* Edit the suite name or description +* Update the set of test cases included +* Delete the test suite when it is no longer needed + +These actions are available from the suite’s action menu in the list view. + +// Screenshot suggestion: +// Show the Test Suite list with the action menu visible + +== Run a test suite + +When a test suite is run, a configuration window is displayed before execution begins. + +In this step, users review and define execution settings such as the application version, devices or device bundles, and allocation strategy. These settings control how the test cases in the suite are distributed and executed. + +After the configuration is confirmed, the test suite is executed and a test run is created. + +// Screenshot suggestion: +// Configuration window shown before execution + +== Related pages + +* xref:test-management:test-cases.adoc[Test cases] +* xref:test-management:test-runs.adoc[Test runs] +* xref:test-management:test-reruns.adoc[Test reruns]