diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..887a2c1
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,2 @@
+# SCM syntax highlighting & preventing 3-way merges
+pixi.lock merge=binary linguist-language=YAML linguist-generated=true
diff --git a/.gitignore b/.gitignore
index 11ba12b..9f18c17 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,3 +20,12 @@ yarn-error.log*
package-lock.json
stories
.storybook
+conf.py
+
+# pixi environments
+.pixi
+*.egg-info
+pixi.toml
+.gitattributes
+pixi.lock
+_build
diff --git a/piximi-documentation/_static/style.css b/piximi-documentation/_static/style.css
index f1f085e..5b152e0 100644
--- a/piximi-documentation/_static/style.css
+++ b/piximi-documentation/_static/style.css
@@ -1,49 +1,97 @@
.text-img {
- height: 1.5em;
- padding-right: 0.5em;
+ height: 1.5em;
+ padding-right: 0.5em;
}
.annotation-gif {
- box-shadow: rgba(45, 35, 66, 0.4) 0 4px 8px, rgba(45, 35, 66, 0.3) 0 7px 13px -3px;
+ box-shadow: rgba(45, 35, 66, 0.4) 0 4px 8px,
+ rgba(45, 35, 66, 0.3) 0 7px 13px -3px;
border-radius: 10px;
}
.img-shadow {
- box-shadow: rgba(45, 35, 66, 0.4) 0 4px 8px, rgba(45, 35, 66, 0.3) 0 7px 13px -3px;
+ box-shadow: rgba(45, 35, 66, 0.4) 0 4px 8px,
+ rgba(45, 35, 66, 0.3) 0 7px 13px -3px;
border-radius: 10px;
}
.piximi-btn {
- font-size: 1.5rem;
+ font-size: 1.25rem;
align-items: center;
- background-color: #FCFCFD;
border-radius: 8px;
- border-width: 0;
- box-shadow: rgba(45, 35, 66, 0.4) 0 2px 4px,rgba(45, 35, 66, 0.3) 0 7px 13px -3px;
- color: black !important;
+ border: 1px solid #02aec5;
+ color: #02aec5 !important;
display: inline-flex;
margin-top: 1rem;
- margin-left: 2rem;
+ margin-inline: auto;
padding: 1rem;
text-align: center;
text-decoration: none;
- transition: box-shadow .15s,transform .15s;
+ transition: box-shadow 0.15s, transform 0.15s;
touch-action: manipulation;
- will-change: box-shadow,transform;
+ will-change: box-shadow, transform;
outline: none;
}
.piximi-btn:focus {
- box-shadow: #D6D6E7 0 0 0 1.5px inset, rgba(45, 35, 66, 0.4) 0 2px 4px, rgba(45, 35, 66, 0.3) 0 7px 13px -3px;
+ box-shadow: #d6d6e7 0 0 0 1.5px inset, rgba(45, 35, 66, 0.4) 0 2px 4px,
+ rgba(45, 35, 66, 0.3) 0 7px 13px -3px;
}
.piximi-btn:hover {
- box-shadow: rgba(45, 35, 66, 0.4) 0 4px 8px, rgba(45, 35, 66, 0.3) 0 7px 13px -3px;
+ box-shadow: rgba(45, 35, 66, 0.4) 0 4px 8px,
+ rgba(45, 35, 66, 0.3) 0 7px 13px -3px;
transform: translateY(-2px);
text-decoration: none;
}
.piximi-btn:active {
- box-shadow: #D6D6E7 0 3px 7px inset;
+ box-shadow: #d6d6e7 0 3px 7px inset;
transform: translateY(2px);
-}
\ No newline at end of file
+}
+
+.horiz-button-group {
+ display: flex;
+ flex-direction: row;
+ justify-content: space-around;
+ gap: 1rem;
+}
+
+.theme-img {
+ display: none;
+}
+
+.content-img {
+ border-radius: 8px;
+ border: 1px solid #d1d5da;
+}
+
+/* Show light image only in light theme */
+html[data-theme="light"] .light-img {
+ display: block;
+}
+
+/* Show dark image only in dark theme */
+html[data-theme="dark"] .dark-img {
+ display: block;
+}
+
+.centered-stack {
+ width: 100%;
+ display: flex;
+ justify-content: center;
+ align-items: center;
+ flex-direction: column;
+}
+
+.grid-text {
+ display: flex;
+ flex-direction: column;
+ justify-content: center;
+ flex-grow: 1;
+ color: red;
+}
+
+.translate {
+ color: red;
+}
diff --git a/piximi-documentation/_toc.yml b/piximi-documentation/_toc.yml
index 195d220..0bc669c 100644
--- a/piximi-documentation/_toc.yml
+++ b/piximi-documentation/_toc.yml
@@ -1,28 +1,35 @@
format: jb-book
root: intro
parts:
-- caption: Piximi Views
- chapters:
- - file: main-menu
- - file: annotation-guide
- - file: segmentation
- - file: classification
- - file: measurements
-- caption: Tutorials
- chapters:
- - file: translocation_tutorial
- - file: translocation_tutorial_ES
- - file: translocation_tutorial_pt_BR
- - file: classify-example-eukaryotic-image
- - file: classify-example-eukaryotic-object
-- caption: How-to Guides
- chapters:
- - file: create-cell-crops-with-cellprofiler
- - file: PiximiConverter.ipynb
-- caption: Technical information
- chapters:
- - file: hyperparameters
- - file: technical-faq
- - file: work-in-progress
- - file: example-datasets
- - file: citing-piximi
+ - caption: Quick Start
+ chapters:
+ - file: quick-start
+ - caption: Piximi Views
+ chapters:
+ - file: pages/detail/projectviewer
+ sections:
+ - file: pages/detail/projectviewer-classification
+ - file: pages/detail/projectviewer-segmentation
+ - file: pages/detail/imageviewer
+ sections:
+ - file: pages/detail/imageviewer-tools-annotation
+ - file: pages/detail/measurements-viewer
+ - caption: Tutorials
+ chapters:
+ - file: pages/tutorial/translocation_tutorial
+ - file: pages/tutorial/translocation_tutorial_ES
+ - file: pages/tutorial/translocation_tutorial_pt_BR
+ - file: pages/tutorial/creating-measurements
+ - file: pages/tutorial/classify-example-eukaryotic-image
+ - file: pages/tutorial/segmentation-tutorial
+ - caption: How-to Guides
+ chapters:
+ - file: pages/how-to/create-cell-crops-with-cellprofiler
+ - file: pages/how-to/PiximiConverter.ipynb
+ - caption: Technical information
+ chapters:
+ - file: pages/technical/hyperparameters
+ - file: pages/technical/technical-faq
+ - file: pages/technical/work-in-progress
+ - file: pages/technical/example-datasets
+ - file: pages/technical/citing-piximi
diff --git a/piximi-documentation/classification.md b/piximi-documentation/classification.md
deleted file mode 100644
index dec9dde..0000000
--- a/piximi-documentation/classification.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Classification
-
-...Coming soon!...
-
-See our [image classification tutorial](./classify-example-eukaryotic-image.md)
-and [object classification tutorial](./create-cell-crops-with-cellprofiler.md) for more information as we build out this page.
diff --git a/piximi-documentation/img/user-guide-color-to-gray-view.png b/piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-color-to-gray-view.png
similarity index 100%
rename from piximi-documentation/img/user-guide-color-to-gray-view.png
rename to piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-color-to-gray-view.png
diff --git a/piximi-documentation/img/user-guide-identify-primary-object-view.png b/piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-identify-primary-object-view.png
similarity index 100%
rename from piximi-documentation/img/user-guide-identify-primary-object-view.png
rename to piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-identify-primary-object-view.png
diff --git a/piximi-documentation/img/user-guide-identify-secondary-object-view.png b/piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-identify-secondary-object-view.png
similarity index 100%
rename from piximi-documentation/img/user-guide-identify-secondary-object-view.png
rename to piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-identify-secondary-object-view.png
diff --git a/piximi-documentation/img/user-guide-images-input-view.png b/piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-images-input-view.png
similarity index 100%
rename from piximi-documentation/img/user-guide-images-input-view.png
rename to piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-images-input-view.png
diff --git a/piximi-documentation/img/user-guide-metadata-view-4.2.1.png b/piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-metadata-view-4.2.1.png
similarity index 100%
rename from piximi-documentation/img/user-guide-metadata-view-4.2.1.png
rename to piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-metadata-view-4.2.1.png
diff --git a/piximi-documentation/img/user-guide-names-and-types-view.png b/piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-names-and-types-view.png
similarity index 100%
rename from piximi-documentation/img/user-guide-names-and-types-view.png
rename to piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-names-and-types-view.png
diff --git a/piximi-documentation/img/user-guide-save-cropped-objects-view-4.2.1.png b/piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-save-cropped-objects-view-4.2.1.png
similarity index 100%
rename from piximi-documentation/img/user-guide-save-cropped-objects-view-4.2.1.png
rename to piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-save-cropped-objects-view-4.2.1.png
diff --git a/piximi-documentation/img/user-guide-save-cropped-objects-view.png b/piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-save-cropped-objects-view.png
similarity index 100%
rename from piximi-documentation/img/user-guide-save-cropped-objects-view.png
rename to piximi-documentation/img/cellprofiler-cell-crops-examples/user-guide-save-cropped-objects-view.png
diff --git a/piximi-documentation/img/eukaryotic-classification/human-u20s-category-dark.webp b/piximi-documentation/img/eukaryotic-classification/human-u20s-category-dark.webp
new file mode 100644
index 0000000..1c2d092
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/human-u20s-category-dark.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/human-u20s-category-light.webp b/piximi-documentation/img/eukaryotic-classification/human-u20s-category-light.webp
new file mode 100644
index 0000000..d30303f
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/human-u20s-category-light.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-button-dark.webp b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-button-dark.webp
new file mode 100644
index 0000000..f8385de
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-button-dark.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-button-light.webp b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-button-light.webp
new file mode 100644
index 0000000..fa9c7ed
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-button-light.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-exit-dark.webp b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-exit-dark.webp
new file mode 100644
index 0000000..1f4004b
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-exit-dark.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-exit-light.webp b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-exit-light.webp
new file mode 100644
index 0000000..4febe60
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-exit-light.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-fit-dark.webp b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-fit-dark.webp
new file mode 100644
index 0000000..e8928dc
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-fit-dark.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-fit-light.webp b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-fit-light.webp
new file mode 100644
index 0000000..5703059
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/human-u20s-fit-dialog-fit-light.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/human-u20s-predict-dark.webp b/piximi-documentation/img/eukaryotic-classification/human-u20s-predict-dark.webp
new file mode 100644
index 0000000..060919c
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/human-u20s-predict-dark.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/human-u20s-predict-light.webp b/piximi-documentation/img/eukaryotic-classification/human-u20s-predict-light.webp
new file mode 100644
index 0000000..620c421
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/human-u20s-predict-light.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/load-example-dark.webp b/piximi-documentation/img/eukaryotic-classification/load-example-dark.webp
new file mode 100644
index 0000000..288f7aa
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/load-example-dark.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/load-example-light.webp b/piximi-documentation/img/eukaryotic-classification/load-example-light.webp
new file mode 100644
index 0000000..7d5d4d4
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/load-example-light.webp differ
diff --git a/piximi-documentation/img/eukaryotic-classification/user-guide-accuracy-plot.webp b/piximi-documentation/img/eukaryotic-classification/user-guide-accuracy-plot.webp
new file mode 100644
index 0000000..f7a379e
Binary files /dev/null and b/piximi-documentation/img/eukaryotic-classification/user-guide-accuracy-plot.webp differ
diff --git a/piximi-documentation/img/evaluate_dialog.png b/piximi-documentation/img/faq/evaluate_dialog.png
similarity index 100%
rename from piximi-documentation/img/evaluate_dialog.png
rename to piximi-documentation/img/faq/evaluate_dialog.png
diff --git a/piximi-documentation/img/evaluate_option.png b/piximi-documentation/img/faq/evaluate_option.png
similarity index 100%
rename from piximi-documentation/img/evaluate_option.png
rename to piximi-documentation/img/faq/evaluate_option.png
diff --git a/piximi-documentation/img/open_classifier_1.png b/piximi-documentation/img/faq/open_classifier_1.png
similarity index 100%
rename from piximi-documentation/img/open_classifier_1.png
rename to piximi-documentation/img/faq/open_classifier_1.png
diff --git a/piximi-documentation/img/open_classifier_2.png b/piximi-documentation/img/faq/open_classifier_2.png
similarity index 100%
rename from piximi-documentation/img/open_classifier_2.png
rename to piximi-documentation/img/faq/open_classifier_2.png
diff --git a/piximi-documentation/img/open_classifier_3.png b/piximi-documentation/img/faq/open_classifier_3.png
similarity index 100%
rename from piximi-documentation/img/open_classifier_3.png
rename to piximi-documentation/img/faq/open_classifier_3.png
diff --git a/piximi-documentation/img/open_project_1.png b/piximi-documentation/img/faq/open_project_1.png
similarity index 100%
rename from piximi-documentation/img/open_project_1.png
rename to piximi-documentation/img/faq/open_project_1.png
diff --git a/piximi-documentation/img/open_project_2.png b/piximi-documentation/img/faq/open_project_2.png
similarity index 100%
rename from piximi-documentation/img/open_project_2.png
rename to piximi-documentation/img/faq/open_project_2.png
diff --git a/piximi-documentation/img/save_classifier_1.png b/piximi-documentation/img/faq/save_classifier_1.png
similarity index 100%
rename from piximi-documentation/img/save_classifier_1.png
rename to piximi-documentation/img/faq/save_classifier_1.png
diff --git a/piximi-documentation/img/save_classifier_2.png b/piximi-documentation/img/faq/save_classifier_2.png
similarity index 100%
rename from piximi-documentation/img/save_classifier_2.png
rename to piximi-documentation/img/faq/save_classifier_2.png
diff --git a/piximi-documentation/img/save_project_1.png b/piximi-documentation/img/faq/save_project_1.png
similarity index 100%
rename from piximi-documentation/img/save_project_1.png
rename to piximi-documentation/img/faq/save_project_1.png
diff --git a/piximi-documentation/img/save_project_2.png b/piximi-documentation/img/faq/save_project_2.png
similarity index 100%
rename from piximi-documentation/img/save_project_2.png
rename to piximi-documentation/img/faq/save_project_2.png
diff --git a/piximi-documentation/img/training_plots.png b/piximi-documentation/img/faq/training_plots.png
similarity index 100%
rename from piximi-documentation/img/training_plots.png
rename to piximi-documentation/img/faq/training_plots.png
diff --git a/piximi-documentation/img/image-viewer/image-viewer-dark-actionbar.webp b/piximi-documentation/img/image-viewer/image-viewer-dark-actionbar.webp
new file mode 100644
index 0000000..23c7b8f
Binary files /dev/null and b/piximi-documentation/img/image-viewer/image-viewer-dark-actionbar.webp differ
diff --git a/piximi-documentation/img/image-viewer/image-viewer-dark-annotated.webp b/piximi-documentation/img/image-viewer/image-viewer-dark-annotated.webp
new file mode 100644
index 0000000..97ca073
Binary files /dev/null and b/piximi-documentation/img/image-viewer/image-viewer-dark-annotated.webp differ
diff --git a/piximi-documentation/img/image-viewer/image-viewer-dark-annotationtools.webp b/piximi-documentation/img/image-viewer/image-viewer-dark-annotationtools.webp
new file mode 100644
index 0000000..04f2acf
Binary files /dev/null and b/piximi-documentation/img/image-viewer/image-viewer-dark-annotationtools.webp differ
diff --git a/piximi-documentation/img/image-viewer/image-viewer-dark-canvas.webp b/piximi-documentation/img/image-viewer/image-viewer-dark-canvas.webp
new file mode 100644
index 0000000..ad1da20
Binary files /dev/null and b/piximi-documentation/img/image-viewer/image-viewer-dark-canvas.webp differ
diff --git a/piximi-documentation/img/image-viewer/image-viewer-dark-imagetools.webp b/piximi-documentation/img/image-viewer/image-viewer-dark-imagetools.webp
new file mode 100644
index 0000000..3ee54d2
Binary files /dev/null and b/piximi-documentation/img/image-viewer/image-viewer-dark-imagetools.webp differ
diff --git a/piximi-documentation/img/image-viewer/image-viewer-light-actionbar.webp b/piximi-documentation/img/image-viewer/image-viewer-light-actionbar.webp
new file mode 100644
index 0000000..c7e2799
Binary files /dev/null and b/piximi-documentation/img/image-viewer/image-viewer-light-actionbar.webp differ
diff --git a/piximi-documentation/img/image-viewer/image-viewer-light-annotated.webp b/piximi-documentation/img/image-viewer/image-viewer-light-annotated.webp
new file mode 100644
index 0000000..88c6dc0
Binary files /dev/null and b/piximi-documentation/img/image-viewer/image-viewer-light-annotated.webp differ
diff --git a/piximi-documentation/img/image-viewer/image-viewer-light-annotationtools.webp b/piximi-documentation/img/image-viewer/image-viewer-light-annotationtools.webp
new file mode 100644
index 0000000..6df7634
Binary files /dev/null and b/piximi-documentation/img/image-viewer/image-viewer-light-annotationtools.webp differ
diff --git a/piximi-documentation/img/image-viewer/image-viewer-light-canvas.webp b/piximi-documentation/img/image-viewer/image-viewer-light-canvas.webp
new file mode 100644
index 0000000..ac9dd5f
Binary files /dev/null and b/piximi-documentation/img/image-viewer/image-viewer-light-canvas.webp differ
diff --git a/piximi-documentation/img/image-viewer/image-viewer-light-imagetools.webp b/piximi-documentation/img/image-viewer/image-viewer-light-imagetools.webp
new file mode 100644
index 0000000..d3701b6
Binary files /dev/null and b/piximi-documentation/img/image-viewer/image-viewer-light-imagetools.webp differ
diff --git a/piximi-documentation/img/load_pretrained_model.png b/piximi-documentation/img/load_pretrained_model.png
deleted file mode 100644
index 80b0b81..0000000
Binary files a/piximi-documentation/img/load_pretrained_model.png and /dev/null differ
diff --git a/piximi-documentation/img/measurements-viewer/measurements-viewer-actionbar-dark.webp b/piximi-documentation/img/measurements-viewer/measurements-viewer-actionbar-dark.webp
new file mode 100644
index 0000000..f4ee57f
Binary files /dev/null and b/piximi-documentation/img/measurements-viewer/measurements-viewer-actionbar-dark.webp differ
diff --git a/piximi-documentation/img/measurements-viewer/measurements-viewer-actionbar-light.webp b/piximi-documentation/img/measurements-viewer/measurements-viewer-actionbar-light.webp
new file mode 100644
index 0000000..2b6c13f
Binary files /dev/null and b/piximi-documentation/img/measurements-viewer/measurements-viewer-actionbar-light.webp differ
diff --git a/piximi-documentation/img/measurements-viewer/measurements-viewer-plot-tab-dark.webp b/piximi-documentation/img/measurements-viewer/measurements-viewer-plot-tab-dark.webp
new file mode 100644
index 0000000..f35d989
Binary files /dev/null and b/piximi-documentation/img/measurements-viewer/measurements-viewer-plot-tab-dark.webp differ
diff --git a/piximi-documentation/img/measurements-viewer/measurements-viewer-plot-tab-light.webp b/piximi-documentation/img/measurements-viewer/measurements-viewer-plot-tab-light.webp
new file mode 100644
index 0000000..e18f24d
Binary files /dev/null and b/piximi-documentation/img/measurements-viewer/measurements-viewer-plot-tab-light.webp differ
diff --git a/piximi-documentation/img/measurements-viewer/measurements-viewer-table-tab-dark.webp b/piximi-documentation/img/measurements-viewer/measurements-viewer-table-tab-dark.webp
new file mode 100644
index 0000000..e758b68
Binary files /dev/null and b/piximi-documentation/img/measurements-viewer/measurements-viewer-table-tab-dark.webp differ
diff --git a/piximi-documentation/img/measurements-viewer/measurements-viewer-table-tab-light.webp b/piximi-documentation/img/measurements-viewer/measurements-viewer-table-tab-light.webp
new file mode 100644
index 0000000..44c0ed3
Binary files /dev/null and b/piximi-documentation/img/measurements-viewer/measurements-viewer-table-tab-light.webp differ
diff --git a/piximi-documentation/img/model_predict.png b/piximi-documentation/img/model_predict.png
deleted file mode 100644
index 92e6ec4..0000000
Binary files a/piximi-documentation/img/model_predict.png and /dev/null differ
diff --git a/piximi-documentation/img/project-viewer/classifier/classifier-section-dark.webp b/piximi-documentation/img/project-viewer/classifier/classifier-section-dark.webp
new file mode 100644
index 0000000..fb161df
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/classifier-section-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/classifier-section-light.webp b/piximi-documentation/img/project-viewer/classifier/classifier-section-light.webp
new file mode 100644
index 0000000..e3b97e8
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/classifier-section-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/eval-dark.webp b/piximi-documentation/img/project-viewer/classifier/eval-dark.webp
new file mode 100644
index 0000000..b8feaea
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/eval-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/eval-light.webp b/piximi-documentation/img/project-viewer/classifier/eval-light.webp
new file mode 100644
index 0000000..2bf0696
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/eval-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/fit-classifier-dark.webp b/piximi-documentation/img/project-viewer/classifier/fit-classifier-dark.webp
new file mode 100644
index 0000000..015963a
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/fit-classifier-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/fit-classifier-light.webp b/piximi-documentation/img/project-viewer/classifier/fit-classifier-light.webp
new file mode 100644
index 0000000..76e7eb4
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/fit-classifier-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/history-plots-dark.webp b/piximi-documentation/img/project-viewer/classifier/history-plots-dark.webp
new file mode 100644
index 0000000..b3945ac
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/history-plots-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/history-plots-light.webp b/piximi-documentation/img/project-viewer/classifier/history-plots-light.webp
new file mode 100644
index 0000000..464337c
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/history-plots-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/load-local-class-dark.webp b/piximi-documentation/img/project-viewer/classifier/load-local-class-dark.webp
new file mode 100644
index 0000000..0947dd0
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/load-local-class-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/load-local-class-light.webp b/piximi-documentation/img/project-viewer/classifier/load-local-class-light.webp
new file mode 100644
index 0000000..cb61d98
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/load-local-class-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/load-remote-class-dark.webp b/piximi-documentation/img/project-viewer/classifier/load-remote-class-dark.webp
new file mode 100644
index 0000000..1a42f8f
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/load-remote-class-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/load-remote-class-light.webp b/piximi-documentation/img/project-viewer/classifier/load-remote-class-light.webp
new file mode 100644
index 0000000..071ad54
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/load-remote-class-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/predict-options-dark.webp b/piximi-documentation/img/project-viewer/classifier/predict-options-dark.webp
new file mode 100644
index 0000000..a7e4d9e
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/predict-options-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/predict-options-light.webp b/piximi-documentation/img/project-viewer/classifier/predict-options-light.webp
new file mode 100644
index 0000000..1bd041a
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/predict-options-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/summary-dark.webp b/piximi-documentation/img/project-viewer/classifier/summary-dark.webp
new file mode 100644
index 0000000..165fa0a
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/summary-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/classifier/summary-light.webp b/piximi-documentation/img/project-viewer/classifier/summary-light.webp
new file mode 100644
index 0000000..496ef7a
Binary files /dev/null and b/piximi-documentation/img/project-viewer/classifier/summary-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/load_pretrained_model.webp b/piximi-documentation/img/project-viewer/load_pretrained_model.webp
new file mode 100644
index 0000000..17b8413
Binary files /dev/null and b/piximi-documentation/img/project-viewer/load_pretrained_model.webp differ
diff --git a/piximi-documentation/img/project-viewer/model_predict.webp b/piximi-documentation/img/project-viewer/model_predict.webp
new file mode 100644
index 0000000..10b3930
Binary files /dev/null and b/piximi-documentation/img/project-viewer/model_predict.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-actiondrawer-dark.webp b/piximi-documentation/img/project-viewer/project-viewer-actiondrawer-dark.webp
new file mode 100644
index 0000000..5e540f3
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-actiondrawer-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-actiondrawer-light.webp b/piximi-documentation/img/project-viewer/project-viewer-actiondrawer-light.webp
new file mode 100644
index 0000000..a7e49c6
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-actiondrawer-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-appbar-dark.webp b/piximi-documentation/img/project-viewer/project-viewer-appbar-dark.webp
new file mode 100644
index 0000000..5a7c900
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-appbar-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-appbar-light.webp b/piximi-documentation/img/project-viewer/project-viewer-appbar-light.webp
new file mode 100644
index 0000000..a7e49c6
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-appbar-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-dark-actionbar.webp b/piximi-documentation/img/project-viewer/project-viewer-dark-actionbar.webp
new file mode 100644
index 0000000..b55515b
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-dark-actionbar.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-dark-annotated.webp b/piximi-documentation/img/project-viewer/project-viewer-dark-annotated.webp
new file mode 100644
index 0000000..e0512ff
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-dark-annotated.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-dark-appbar.webp b/piximi-documentation/img/project-viewer/project-viewer-dark-appbar.webp
new file mode 100644
index 0000000..4b83739
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-dark-appbar.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-dark-cropped.webp b/piximi-documentation/img/project-viewer/project-viewer-dark-cropped.webp
new file mode 100644
index 0000000..89e7a09
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-dark-cropped.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-dark-maingrid.webp b/piximi-documentation/img/project-viewer/project-viewer-dark-maingrid.webp
new file mode 100644
index 0000000..43c11f0
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-dark-maingrid.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-imagegrid-dark.webp b/piximi-documentation/img/project-viewer/project-viewer-imagegrid-dark.webp
new file mode 100644
index 0000000..c0fc85c
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-imagegrid-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-imagegrid-light.webp b/piximi-documentation/img/project-viewer/project-viewer-imagegrid-light.webp
new file mode 100644
index 0000000..a7e49c6
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-imagegrid-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-infobar-dark.webp b/piximi-documentation/img/project-viewer/project-viewer-infobar-dark.webp
new file mode 100644
index 0000000..bc59171
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-infobar-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-infobar-light.webp b/piximi-documentation/img/project-viewer/project-viewer-infobar-light.webp
new file mode 100644
index 0000000..a7e49c6
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-infobar-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-light-actionbar.webp b/piximi-documentation/img/project-viewer/project-viewer-light-actionbar.webp
new file mode 100644
index 0000000..3a04596
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-light-actionbar.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-light-annotated.webp b/piximi-documentation/img/project-viewer/project-viewer-light-annotated.webp
new file mode 100644
index 0000000..0002376
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-light-annotated.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-light-appbar.webp b/piximi-documentation/img/project-viewer/project-viewer-light-appbar.webp
new file mode 100644
index 0000000..1850314
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-light-appbar.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-light-cropped.webp b/piximi-documentation/img/project-viewer/project-viewer-light-cropped.webp
new file mode 100644
index 0000000..2298718
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-light-cropped.webp differ
diff --git a/piximi-documentation/img/project-viewer/project-viewer-light-maingrid.webp b/piximi-documentation/img/project-viewer/project-viewer-light-maingrid.webp
new file mode 100644
index 0000000..b52e8ea
Binary files /dev/null and b/piximi-documentation/img/project-viewer/project-viewer-light-maingrid.webp differ
diff --git a/piximi-documentation/img/project-viewer/segmentation_output.webp b/piximi-documentation/img/project-viewer/segmentation_output.webp
new file mode 100644
index 0000000..8ee31c6
Binary files /dev/null and b/piximi-documentation/img/project-viewer/segmentation_output.webp differ
diff --git a/piximi-documentation/img/project-viewer/segmenter/load-seg-dark.webp b/piximi-documentation/img/project-viewer/segmenter/load-seg-dark.webp
new file mode 100644
index 0000000..10ff77b
Binary files /dev/null and b/piximi-documentation/img/project-viewer/segmenter/load-seg-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/segmenter/load-seg-light.webp b/piximi-documentation/img/project-viewer/segmenter/load-seg-light.webp
new file mode 100644
index 0000000..22e3612
Binary files /dev/null and b/piximi-documentation/img/project-viewer/segmenter/load-seg-light.webp differ
diff --git a/piximi-documentation/img/project-viewer/segmenter/segmentation-section-dark.webp b/piximi-documentation/img/project-viewer/segmenter/segmentation-section-dark.webp
new file mode 100644
index 0000000..5a39bb2
Binary files /dev/null and b/piximi-documentation/img/project-viewer/segmenter/segmentation-section-dark.webp differ
diff --git a/piximi-documentation/img/project-viewer/segmenter/segmentation-section-light.webp b/piximi-documentation/img/project-viewer/segmenter/segmentation-section-light.webp
new file mode 100644
index 0000000..92c7c01
Binary files /dev/null and b/piximi-documentation/img/project-viewer/segmenter/segmentation-section-light.webp differ
diff --git a/piximi-documentation/img/segmentation-tutorial/u20s-cellpose-results-dark.webp b/piximi-documentation/img/segmentation-tutorial/u20s-cellpose-results-dark.webp
new file mode 100644
index 0000000..d8929e1
Binary files /dev/null and b/piximi-documentation/img/segmentation-tutorial/u20s-cellpose-results-dark.webp differ
diff --git a/piximi-documentation/img/segmentation-tutorial/u20s-cellpose-results-light.webp b/piximi-documentation/img/segmentation-tutorial/u20s-cellpose-results-light.webp
new file mode 100644
index 0000000..d1a1979
Binary files /dev/null and b/piximi-documentation/img/segmentation-tutorial/u20s-cellpose-results-light.webp differ
diff --git a/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-dark.webp b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-dark.webp
new file mode 100644
index 0000000..c008647
Binary files /dev/null and b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-dark.webp differ
diff --git a/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-light.webp b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-light.webp
new file mode 100644
index 0000000..7fa8271
Binary files /dev/null and b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-light.webp differ
diff --git a/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-load-dark.webp b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-load-dark.webp
new file mode 100644
index 0000000..403fdf8
Binary files /dev/null and b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-load-dark.webp differ
diff --git a/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-load-light.webp b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-load-light.webp
new file mode 100644
index 0000000..32880c8
Binary files /dev/null and b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-load-light.webp differ
diff --git a/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-models-dark.webp b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-models-dark.webp
new file mode 100644
index 0000000..110596a
Binary files /dev/null and b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-models-dark.webp differ
diff --git a/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-models-light.webp b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-models-light.webp
new file mode 100644
index 0000000..d5dd0ee
Binary files /dev/null and b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-models-light.webp differ
diff --git a/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-predict-dark.webp b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-predict-dark.webp
new file mode 100644
index 0000000..5adf7a8
Binary files /dev/null and b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-predict-dark.webp differ
diff --git a/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-predict-light.webp b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-predict-light.webp
new file mode 100644
index 0000000..57bf7ec
Binary files /dev/null and b/piximi-documentation/img/segmentation-tutorial/u20s-segmentation-predict-light.webp differ
diff --git a/piximi-documentation/img/segmentation_output.png b/piximi-documentation/img/segmentation_output.png
deleted file mode 100644
index e5f468a..0000000
Binary files a/piximi-documentation/img/segmentation_output.png and /dev/null differ
diff --git a/piximi-documentation/img/translocation-tutorial/accept-predictions-dark.webp b/piximi-documentation/img/translocation-tutorial/accept-predictions-dark.webp
new file mode 100644
index 0000000..7ace981
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/accept-predictions-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/accept-predictions-light.webp b/piximi-documentation/img/translocation-tutorial/accept-predictions-light.webp
new file mode 100644
index 0000000..5fa766b
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/accept-predictions-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/categorize-dark.webp b/piximi-documentation/img/translocation-tutorial/categorize-dark.webp
new file mode 100644
index 0000000..1e9bcaa
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/categorize-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/categorize-light.webp b/piximi-documentation/img/translocation-tutorial/categorize-light.webp
new file mode 100644
index 0000000..e6ef93b
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/categorize-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/cellpose-select-dark.webp b/piximi-documentation/img/translocation-tutorial/cellpose-select-dark.webp
new file mode 100644
index 0000000..99ac606
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/cellpose-select-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/cellpose-select-light.webp b/piximi-documentation/img/translocation-tutorial/cellpose-select-light.webp
new file mode 100644
index 0000000..1fe7c0d
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/cellpose-select-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/cellpose_cells-dark.webp b/piximi-documentation/img/translocation-tutorial/cellpose_cells-dark.webp
new file mode 100644
index 0000000..1d3e81d
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/cellpose_cells-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/cellpose_cells-light.webp b/piximi-documentation/img/translocation-tutorial/cellpose_cells-light.webp
new file mode 100644
index 0000000..f37b830
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/cellpose_cells-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/classifier-create-category-dark.webp b/piximi-documentation/img/translocation-tutorial/classifier-create-category-dark.webp
new file mode 100644
index 0000000..c61f2f4
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/classifier-create-category-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/classifier-create-category-light.webp b/piximi-documentation/img/translocation-tutorial/classifier-create-category-light.webp
new file mode 100644
index 0000000..3680778
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/classifier-create-category-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/classifier-section-dark.webp b/piximi-documentation/img/translocation-tutorial/classifier-section-dark.webp
new file mode 100644
index 0000000..121cbc1
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/classifier-section-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/classifier-section-light.webp b/piximi-documentation/img/translocation-tutorial/classifier-section-light.webp
new file mode 100644
index 0000000..45df29a
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/classifier-section-light.webp differ
diff --git a/piximi-documentation/img/tutorial_images/Figure1.png b/piximi-documentation/img/translocation-tutorial/f0x01a.png
similarity index 100%
rename from piximi-documentation/img/tutorial_images/Figure1.png
rename to piximi-documentation/img/translocation-tutorial/f0x01a.png
diff --git a/piximi-documentation/img/translocation-tutorial/image-viewer-dark.webp b/piximi-documentation/img/translocation-tutorial/image-viewer-dark.webp
new file mode 100644
index 0000000..0d9fb53
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/image-viewer-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/image-viewer-light.webp b/piximi-documentation/img/translocation-tutorial/image-viewer-light.webp
new file mode 100644
index 0000000..94df6b8
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/image-viewer-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/load-model-dark.webp b/piximi-documentation/img/translocation-tutorial/load-model-dark.webp
new file mode 100644
index 0000000..3f61015
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/load-model-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/load-model-light.webp b/piximi-documentation/img/translocation-tutorial/load-model-light.webp
new file mode 100644
index 0000000..1fc5797
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/load-model-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/measurements-data-grid-dark.webp b/piximi-documentation/img/translocation-tutorial/measurements-data-grid-dark.webp
new file mode 100644
index 0000000..d8fc877
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/measurements-data-grid-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/measurements-data-grid-light.webp b/piximi-documentation/img/translocation-tutorial/measurements-data-grid-light.webp
new file mode 100644
index 0000000..95ccad7
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/measurements-data-grid-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/measurements-plot-switch-dark.webp b/piximi-documentation/img/translocation-tutorial/measurements-plot-switch-dark.webp
new file mode 100644
index 0000000..d7603cb
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/measurements-plot-switch-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/measurements-plot-switch-light.webp b/piximi-documentation/img/translocation-tutorial/measurements-plot-switch-light.webp
new file mode 100644
index 0000000..e36b7a8
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/measurements-plot-switch-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/measurements-swarm-plot-dark.webp b/piximi-documentation/img/translocation-tutorial/measurements-swarm-plot-dark.webp
new file mode 100644
index 0000000..beec7e6
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/measurements-swarm-plot-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/measurements-swarm-plot-light.webp b/piximi-documentation/img/translocation-tutorial/measurements-swarm-plot-light.webp
new file mode 100644
index 0000000..72e9174
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/measurements-swarm-plot-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/measurements-table-create-dark.webp b/piximi-documentation/img/translocation-tutorial/measurements-table-create-dark.webp
new file mode 100644
index 0000000..80bd409
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/measurements-table-create-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/measurements-table-create-light.webp b/piximi-documentation/img/translocation-tutorial/measurements-table-create-light.webp
new file mode 100644
index 0000000..c79442f
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/measurements-table-create-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/nav-measurements-dark.webp b/piximi-documentation/img/translocation-tutorial/nav-measurements-dark.webp
new file mode 100644
index 0000000..14be3cf
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/nav-measurements-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/nav-measurements-light.webp b/piximi-documentation/img/translocation-tutorial/nav-measurements-light.webp
new file mode 100644
index 0000000..86324ac
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/nav-measurements-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/open-model-dark.webp b/piximi-documentation/img/translocation-tutorial/open-model-dark.webp
new file mode 100644
index 0000000..eb34b4b
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/open-model-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/open-model-light.webp b/piximi-documentation/img/translocation-tutorial/open-model-light.webp
new file mode 100644
index 0000000..bdd1c6d
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/open-model-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/open-zip-dark.webp b/piximi-documentation/img/translocation-tutorial/open-zip-dark.webp
new file mode 100644
index 0000000..ea6f7ca
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/open-zip-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/open-zip-light.webp b/piximi-documentation/img/translocation-tutorial/open-zip-light.webp
new file mode 100644
index 0000000..f0d32ed
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/open-zip-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/predict-classifier-dark.webp b/piximi-documentation/img/translocation-tutorial/predict-classifier-dark.webp
new file mode 100644
index 0000000..e08399e
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/predict-classifier-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/predict-classifier-light.webp b/piximi-documentation/img/translocation-tutorial/predict-classifier-light.webp
new file mode 100644
index 0000000..ad1546a
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/predict-classifier-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/predict-dark.webp b/piximi-documentation/img/translocation-tutorial/predict-dark.webp
new file mode 100644
index 0000000..b9abef7
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/predict-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/predict-light.webp b/piximi-documentation/img/translocation-tutorial/predict-light.webp
new file mode 100644
index 0000000..39d4ae3
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/predict-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/segmenter-section-dark.webp b/piximi-documentation/img/translocation-tutorial/segmenter-section-dark.webp
new file mode 100644
index 0000000..c448616
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/segmenter-section-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/segmenter-section-light.webp b/piximi-documentation/img/translocation-tutorial/segmenter-section-light.webp
new file mode 100644
index 0000000..30ea0cf
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/segmenter-section-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/select-all-dark.webp b/piximi-documentation/img/translocation-tutorial/select-all-dark.webp
new file mode 100644
index 0000000..115e5e2
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/select-all-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/select-all-light.webp b/piximi-documentation/img/translocation-tutorial/select-all-light.webp
new file mode 100644
index 0000000..9226356
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/select-all-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/training-eval-dark.webp b/piximi-documentation/img/translocation-tutorial/training-eval-dark.webp
new file mode 100644
index 0000000..5b0acd1
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/training-eval-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/training-eval-light.webp b/piximi-documentation/img/translocation-tutorial/training-eval-light.webp
new file mode 100644
index 0000000..f9a150d
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/training-eval-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/training-plots-dark.webp b/piximi-documentation/img/translocation-tutorial/training-plots-dark.webp
new file mode 100644
index 0000000..25b5236
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/training-plots-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/training-plots-light.webp b/piximi-documentation/img/translocation-tutorial/training-plots-light.webp
new file mode 100644
index 0000000..84dd1af
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/training-plots-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/training-settings-dark.webp b/piximi-documentation/img/translocation-tutorial/training-settings-dark.webp
new file mode 100644
index 0000000..81e01c8
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/training-settings-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/training-settings-light.webp b/piximi-documentation/img/translocation-tutorial/training-settings-light.webp
new file mode 100644
index 0000000..d542d3c
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/training-settings-light.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/trans-project-images-dark.webp b/piximi-documentation/img/translocation-tutorial/trans-project-images-dark.webp
new file mode 100644
index 0000000..f798f86
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/trans-project-images-dark.webp differ
diff --git a/piximi-documentation/img/translocation-tutorial/trans-project-images-light.webp b/piximi-documentation/img/translocation-tutorial/trans-project-images-light.webp
new file mode 100644
index 0000000..f50e3ab
Binary files /dev/null and b/piximi-documentation/img/translocation-tutorial/trans-project-images-light.webp differ
diff --git a/piximi-documentation/img/tutorial_images/Category.png b/piximi-documentation/img/tutorial_images/Category.png
deleted file mode 100644
index e8dc17c..0000000
Binary files a/piximi-documentation/img/tutorial_images/Category.png and /dev/null differ
diff --git a/piximi-documentation/img/tutorial_images/Figure10.png b/piximi-documentation/img/tutorial_images/Figure10.png
deleted file mode 100644
index ddcf92f..0000000
Binary files a/piximi-documentation/img/tutorial_images/Figure10.png and /dev/null differ
diff --git a/piximi-documentation/img/tutorial_images/Figure2.png b/piximi-documentation/img/tutorial_images/Figure2.png
deleted file mode 100644
index f6913b8..0000000
Binary files a/piximi-documentation/img/tutorial_images/Figure2.png and /dev/null differ
diff --git a/piximi-documentation/img/tutorial_images/Figure3.png b/piximi-documentation/img/tutorial_images/Figure3.png
deleted file mode 100644
index 758f2b2..0000000
Binary files a/piximi-documentation/img/tutorial_images/Figure3.png and /dev/null differ
diff --git a/piximi-documentation/img/tutorial_images/Figure4.png b/piximi-documentation/img/tutorial_images/Figure4.png
deleted file mode 100644
index 2d39250..0000000
Binary files a/piximi-documentation/img/tutorial_images/Figure4.png and /dev/null differ
diff --git a/piximi-documentation/img/tutorial_images/Figure5.png b/piximi-documentation/img/tutorial_images/Figure5.png
deleted file mode 100644
index ce78fa7..0000000
Binary files a/piximi-documentation/img/tutorial_images/Figure5.png and /dev/null differ
diff --git a/piximi-documentation/img/tutorial_images/Figure6.png b/piximi-documentation/img/tutorial_images/Figure6.png
deleted file mode 100644
index 981787e..0000000
Binary files a/piximi-documentation/img/tutorial_images/Figure6.png and /dev/null differ
diff --git a/piximi-documentation/img/tutorial_images/Figure7.png b/piximi-documentation/img/tutorial_images/Figure7.png
deleted file mode 100644
index e14b645..0000000
Binary files a/piximi-documentation/img/tutorial_images/Figure7.png and /dev/null differ
diff --git a/piximi-documentation/img/tutorial_images/Figure8.png b/piximi-documentation/img/tutorial_images/Figure8.png
deleted file mode 100644
index fd9ade4..0000000
Binary files a/piximi-documentation/img/tutorial_images/Figure8.png and /dev/null differ
diff --git a/piximi-documentation/img/tutorial_images/Figure9.png b/piximi-documentation/img/tutorial_images/Figure9.png
deleted file mode 100644
index 4ef7a89..0000000
Binary files a/piximi-documentation/img/tutorial_images/Figure9.png and /dev/null differ
diff --git a/piximi-documentation/img/user-guide-accuracy-plot.png b/piximi-documentation/img/user-guide-accuracy-plot.png
deleted file mode 100644
index 8414dd6..0000000
Binary files a/piximi-documentation/img/user-guide-accuracy-plot.png and /dev/null differ
diff --git a/piximi-documentation/img/user-guide-exit-fit.png b/piximi-documentation/img/user-guide-exit-fit.png
deleted file mode 100644
index 0cf14ee..0000000
Binary files a/piximi-documentation/img/user-guide-exit-fit.png and /dev/null differ
diff --git a/piximi-documentation/img/user-guide-open-img.png b/piximi-documentation/img/user-guide-open-img.png
deleted file mode 100644
index 1dba649..0000000
Binary files a/piximi-documentation/img/user-guide-open-img.png and /dev/null differ
diff --git a/piximi-documentation/img/user-guide-run-fit.png b/piximi-documentation/img/user-guide-run-fit.png
deleted file mode 100644
index c665058..0000000
Binary files a/piximi-documentation/img/user-guide-run-fit.png and /dev/null differ
diff --git a/piximi-documentation/img/user-guide-u2os-fit-settings.png b/piximi-documentation/img/user-guide-u2os-fit-settings.png
deleted file mode 100644
index b649f17..0000000
Binary files a/piximi-documentation/img/user-guide-u2os-fit-settings.png and /dev/null differ
diff --git a/piximi-documentation/img/user-guide-u2os-label-highlight.png b/piximi-documentation/img/user-guide-u2os-label-highlight.png
deleted file mode 100644
index 6462fcb..0000000
Binary files a/piximi-documentation/img/user-guide-u2os-label-highlight.png and /dev/null differ
diff --git a/piximi-documentation/img/user-guide-u2os-run-predict.png b/piximi-documentation/img/user-guide-u2os-run-predict.png
deleted file mode 100644
index fba89e9..0000000
Binary files a/piximi-documentation/img/user-guide-u2os-run-predict.png and /dev/null differ
diff --git a/piximi-documentation/img/segmentation_model.png b/piximi-documentation/img/vestigial/segmentation_model.png
similarity index 100%
rename from piximi-documentation/img/segmentation_model.png
rename to piximi-documentation/img/vestigial/segmentation_model.png
diff --git a/piximi-documentation/img/user-guide-left-drawer-segmentation.png b/piximi-documentation/img/vestigial/user-guide-left-drawer-segmentation.png
similarity index 100%
rename from piximi-documentation/img/user-guide-left-drawer-segmentation.png
rename to piximi-documentation/img/vestigial/user-guide-left-drawer-segmentation.png
diff --git a/piximi-documentation/img/user-guide-new-project.png b/piximi-documentation/img/vestigial/user-guide-new-project.png
similarity index 100%
rename from piximi-documentation/img/user-guide-new-project.png
rename to piximi-documentation/img/vestigial/user-guide-new-project.png
diff --git a/piximi-documentation/img/user-guide-open-annotation.png b/piximi-documentation/img/vestigial/user-guide-open-annotation.png
similarity index 100%
rename from piximi-documentation/img/user-guide-open-annotation.png
rename to piximi-documentation/img/vestigial/user-guide-open-annotation.png
diff --git a/piximi-documentation/img/user-guide-open-image.png b/piximi-documentation/img/vestigial/user-guide-open-image.png
similarity index 100%
rename from piximi-documentation/img/user-guide-open-image.png
rename to piximi-documentation/img/vestigial/user-guide-open-image.png
diff --git a/piximi-documentation/img/user-guide-open-project.png b/piximi-documentation/img/vestigial/user-guide-open-project.png
similarity index 100%
rename from piximi-documentation/img/user-guide-open-project.png
rename to piximi-documentation/img/vestigial/user-guide-open-project.png
diff --git a/piximi-documentation/img/welcome-screen.png b/piximi-documentation/img/welcome-screen.png
new file mode 100644
index 0000000..bd20e56
Binary files /dev/null and b/piximi-documentation/img/welcome-screen.png differ
diff --git a/piximi-documentation/intro.md b/piximi-documentation/intro.md
index 20fcb4e..81577d1 100644
--- a/piximi-documentation/intro.md
+++ b/piximi-documentation/intro.md
@@ -1,39 +1,68 @@
+# Piximi: Images to Discovery
-# Images to Discovery
+Piximi is an open-source, browser-based platform for interactive bioimage analysis. Designed for accessibility and flexibility, Piximi enables users to perform advanced image processing tasks without requiring any installation or deep machine learning expertise. Built with researchers and biologists in mind, it provides a user-friendly interface for annotation, segmentation, classification, and quantitative measurement of microscopy images.
-Piximi is an application that runs entirely from your browser and requires no installation and minimal setup. Our vision for Piximi is to provide an intuitive application for the annotation, segmentation, classification and measurement of images. In this current release, Piximi allows users to annotate specific regions of your images, such as nuclei, and can also use deep learning to classify your images into distinct groups.
+---
-
+## Key Features
+**Browser-Based**
-
Launch Piximi here
+No installation, no dependencies, no compatibility issues. Run Piximi directly in your browser, on any platform.
-[Watch a Piximi tutorial on YouTube](https://youtu.be/-wjUxc4ZHCc?si=sB-Z2EnBtjd-PP_j)
+**Privacy-Preserving**
-```{admonition} Known issues
-:class: warning
+All data stays on your machine. Piximi processes images entirely client-side—ideal for working with sensitive or patient-derived datasets.
-Piximi is an active work in progress. [Click here to see the known issues we're working on](work-in-progress).
-```
+**Integrated Image Analysis Pipeline**
+
+Piximi supports the full Images-to-Discovery workflow:
+
+- Annotation: Draw and label regions of interest manually.
+- Segmentation: Automatically identify and isolate objects within images using AI models.
+- Classification: Assign labels to segmented objects using customizable classifiers.
+- Measurement: Quantify object features and export results for downstream analysis.
+
+**No-Code AI**
+
+Apply and train machine learning models visually. Piximi abstracts complex AI concepts through guided tools, allowing non-specialists to harness their power effectively.
+**Open Source & Extensible**
-## The Piximi Annotator
+Hosted on GitHub. Actively developed with a modular architecture supporting collaboration, feature requests, and custom extensions.
-
+---
-The annotator within Piximi allows you to intuitively create annotations on your image of choice using a variety of tools. These tools include manual pen annotations in addition to more automatic methods with quick annotation. The Piximi annotator also works with **multichannel** and **multiplane** images, both of which can be easily selected to make sure that annotations are placed where they need to be. In future releases, we aim to also include z-plane interpolation to make annotating in 3D even easier.
+## Who Is It For?
-## The Piximi Classifier
+Piximi is built for:
-By using deep learning, Piximi can classify images of a variety of subject matter, such as bacteria, cultured cells, insects and more. The power of deep learning is applied upon a small number of images that have been categorized by the user in Piximi which then gives the computer a starting point on understanding what a particular category of image looks like. For example, the user can teach the computer what cells in G1, S, G2 or M-phases of the cell cycle look like. These user-made categorizations are then intensely examined by the deep learning model within Piximi through a process known as training. During this training process, the deep learning model finds patterns in the data in order to link the input (an image added by the user) to an output (the category, or class, defined by the user). The trained deep learning model can then predict on uncategorized images and determine which class they belong to. In the aforementioned example, this would determine which stage of the cell cycle a particular cell is in without relying on user input.
+- Biologists and life science researchers with no programming background
+- Educators teaching image analysis and machine learning concepts
+- Developers looking to integrate client-side AI tools into scientific workflows
-Ultimately, Piximi offers users a way to have a highly customizable method of classifying large image sets across a range of subject matter by learning from annotations made by the user.
+---
-## Piximi Next Steps
+## Documentation Overview
-Piximi is a work in progress and currently features an image annotator and deep learning-based classification of images. The ultimate aim of Piximi is to provide users with an intuitive application for the annotation, segmentation, classification and measurement of information present in images. In this phase of Piximi, we have released the annotator and classifier as these are important first steps in preparing to add the segmentation and measurement functionalities.
+This documentation provides:
+
+- Step-by-step guides for common workflows
+- Explanations of available tools and settings
+- Descriptions of underlying models and their usage
+- Glossary of key technical terms
+- Contribution and support information
+
+```{admonition} Active Development
+:class: warning
+
+Piximi is in active development. Features and UI/UX may be added, removed, or updated. [Click here to see known issues and planned features](pages/technical/work-in-progress).
+```
-We are developing Piximi in the open. If you have any features that you think that you would like to see in Piximi, please create a discussion in [our Piximi Github repo](https://github.com/piximi/piximi/discussions).
+
## Color annotation
-```{figure} ./img/annotation-demo/PiximiColor.gif
+
+```{figure} ../../img/annotation-demo/PiximiColor.gif
:class: annotation-gif
---
name: color-annotation-demo
@@ -59,7 +55,7 @@ The color annotation tool
## Quick annotation
-```{figure} ./img/annotation-demo/PiximiSuperpixel.gif
+```{figure} ../../img/annotation-demo/PiximiSuperpixel.gif
:class: annotation-gif
---
name: quick-annotation-demo
@@ -68,7 +64,8 @@ The quick annotation (aka superpixel) tool
```
## Rectangular annotation
-```{figure} ./img/annotation-demo/PiximiRectangle.gif
+
+```{figure} ../../img/annotation-demo/PiximiRectangle.gif
:class: annotation-gif
---
name: rectangular-annotation-demo
@@ -77,15 +74,18 @@ The rectangular annotation tool
```
## Elliptical annotation
-```{figure} ./img/annotation-demo/PiximiEllipse.gif
+
+```{figure} ../../img/annotation-demo/PiximiEllipse.gif
:class: annotation-gif
---
name: elliptical-annotation-demo
---
The elliptical annotation tool
```
+
## Polygonal annotation
-```{figure} ./img/annotation-demo/PiximiPolygon.gif
+
+```{figure} ../../img/annotation-demo/PiximiPolygon.gif
:class: annotation-gif
---
name: polygon-annotation-demo
@@ -94,7 +94,8 @@ The polygon annotation tool
```
## Pen annotation
-```{figure} ./img/annotation-demo/PiximiPen.gif
+
+```{figure} ../../img/annotation-demo/PiximiPen.gif
:class: annotation-gif
---
name: pen-annotation-demo
@@ -103,7 +104,8 @@ The pen annotation tool
```
## Lasso annotation
-```{figure} ./img/annotation-demo/PiximiLasso.gif
+
+```{figure} ../../img/annotation-demo/PiximiLasso.gif
:class: annotation-gif
---
name: lasso-annotation-demo
@@ -111,7 +113,4 @@ name: lasso-annotation-demo
The lasso annotation tool
```
-
-
-
diff --git a/piximi-documentation/pages/detail/imageviewer.md b/piximi-documentation/pages/detail/imageviewer.md
new file mode 100644
index 0000000..bddd370
--- /dev/null
+++ b/piximi-documentation/pages/detail/imageviewer.md
@@ -0,0 +1,131 @@
+# Image Viewer
+
+The annotator in Piximi can quickly create annotations for your **multichannel** and **multiplane** images. Below is a showcase of some of the different annotation tools that Piximi offers.
+
+
+
+
+
+
+
+1. Action Drawer
+2. Canvas
+3. Image Tools
+4. Annotation Tools
+
+## Action Drawer
+
+
+
+
+
+
+
+**1. Export Annotations**
+
+Export the annotation masks for each of the images in the Image Viewer.
+
+Masks can be exportted in a variety of formats:
+
+- Piximi-Formatted JSON (Annotations exportted in this format can be imported back into Piximi.)
+- COCO-Formatted JSON
+- Instance Masks (Labeled or Binary)
+- Semantic Masks (Labeled or Binary)
+- Label MAtrices
+
+The mask images will be exported in the `.tiff` file format.
+
+**2. Image List**
+
+The images viewable in the canvas are listed here. You can export or clear annotations foar a particular image by selecting the associated menu icon.
+
+**3. Create a New Kind**
+
+Click the **+** button to create a new Kind in the project. Existing Kinds are listed here and expanding them reveals the associated categories. Click on the menu button to edit or delete the associated Kind, or clear the associated annotation. You can hide the associated annotations by clicking the "eye" button.
+
+**4. Category List**
+
+Contains the per-Kind categories in the project. Here you can create new categories, hide annotations of a specific category by clicking the lag to the left of it's name, and clear associated annotations from the category's menu.
+
+**5. Clear Annotations**
+
+Clear all or selected annotations
+
+## Canvas
+
+
+
+
+
+
+
+**1. Image**
+
+The image selected in the image list can be viewed here. You can scroll to zoom, and click and drag while holding down `alt`/`option` to pan.
+
+**2. Annotations**
+
+Created annotations are overlayed over the image with a color corresponding to its category.
+
+**3. Cursor Coords**
+
+Displays the coordinated of the cursor respective to the image.
+
+**4. Pixel Color**
+
+Displays the color of the pixel at the current cursor position.
+
+## Image Tools
+
+
+
+
+
+
+
+**1. Zoom Tools**
+
+- _Toggle Zoom Center_: Configure scroll zooming to zoom to the center of the image or to the cursor position
+- _Zoom to Region_: Zoom to a selected region of the image.
+- _Actual Size_: Reset the image to its original size.
+- _Zoom-to-Fit_: Adjust the image so that it fits the canvas.
+- _Center_: Reset the position of the image.
+
+**2. Channel and Plane Adjustment**
+
+- _Channel Adjustment_: Toggle channels on or off, adjust the min and max values, and change the color mapping. Clicking "Apply All" will apply the changes to all images in the Image Viewer.
+- _Plane Adjustment_: When viewing a 3D image, use this to change the visibly plane in the canvas.
+
+**3. Annotation Selection**
+
+Click on the selection tool to select annotations. Holding `shift` while selecting, or clicking then dragging, will select multiple annotations.
+
+## Annotation Tools
+
+
+
+
+
+
+
+**1. Augmentation Type**
+
+- _New_: This augmentation creates a new annotation.
+- _Combine_: With this augmentation selected, any annotation being drawn will be merged with another currently selected annotation.
+- _Subtract_: With this augmentation selected, any annotation being srawn will be subtracted from another currently selected annotation.
+- _Intersection_: With this augmentation selected, a currently selected annotation will be modified to be the intersection of itself and a newly drawn annotation.
+- _Invert_: Inverts the currently selected annotation.
+
+**2. Creation Tools**
+
+- _Rectangular Tool_: Click and drag, or click twice to create a rectangular annotation.
+- _Ellipctical Tool_: Click and drag, or click twice to create an elliptical annotation.
+- _Polygonal Tool_: Click at point where you want a vertex of the polygon, then click on or near the initial vertex to complete the annotation.
+- _Pen Tool_: Free draw an annnotation. Open the slider to select the pen size.
+- _Lasso Tool_: Click and drag to create a boundary for the annotation.
+- _Magnetic Tool_: The magnetic tool tries to find edges of objects to help speed up the annotating process.
+- _Fill Tool_: Click and drag from the center of an object to create an annotation over it.
+- _Quick Annotation Tool_: Attempts to predict annotations near the cursor. Use the slider to adjust the sensitivity of this tool.
+- _Threshold Tool_: Select a region in which to generate annotations. Use the slider to adjust the sensitivity. \*Note: generated mask will be considered as a single annotation.
+
+To see the tools in action go to the [](imageviewer-tools-annotation.md) section.
diff --git a/piximi-documentation/pages/detail/measurements-viewer.md b/piximi-documentation/pages/detail/measurements-viewer.md
new file mode 100644
index 0000000..16c132c
--- /dev/null
+++ b/piximi-documentation/pages/detail/measurements-viewer.md
@@ -0,0 +1,110 @@
+# Measurements Viewer
+
+The annotator in Piximi can quickly create annotations for your **multichannel** and **multiplane** images. Below is a showcase of some of the different annotation tools that Piximi offers.
+
+## Table Creation and Measurement Selection
+
+
+
+
+
+
+
+**1. Table Creation**
+
+Click in the "+" button to create a new measurement group based on one of the Kinds in the project.
+
+**2. Split Selection**
+
+Select the splits yout want to use for the measurements. The splits dictate how the measurements are grouped for statitical analysis (i.e. calculating the mean intensity over all the imges in the **training** partition).
+
+**3. Measurement Selection**
+
+Measurements are separated into two sections; **Intensity** and **Object Geometry**.
+
+_Intensity Measurements_
+
+These measurements are performed for whole images, as well as image crops for annotated objects and include:
+
+- _Total Intensity_: Cummulative sum of the intensity values of each pixel.
+- _Mean Intensity_: The mean of the pixel intensities.
+- _Standard Deviation_: The std of the pixel intensities.
+- _MAD_: The median adjusted deviation of the pixel intensities of the image.
+- _Minimum Intensity_: The minimum intensity of the pixels in the image.
+- _Maximum Intensity_: The maximum intensity of the pixels in the image.
+- _Lower Quartile_: The value of the intensity for which 25% of the pixel intensities are lower.
+- _Upper Quartile_: The value of the intensity for which 75% of the pixel intensities are lower.
+
+_Object Geometry Measurements_
+
+These measurements are perfomres on the object masks, and are not available for the **Image** Kind.
+
+- _Area_: The area of the annotation mask.
+- _Bounding-Box Area_:the area of the annotation's bounding-box
+- _Perimeter_: The perimeter of the annotation mask.
+- _Extent_: The ratio of bounding-box area to mask area.
+- _Equivalent Diameter_: The diameter of a perfect circle whose area is equivalent to the area of the annotation.
+- _Diameter of Equal Perimeter (PED)_: The diameter of a circle whose perimeter is equal to that of the annotation's.
+- _Sphericity_: The extent to which an annotation is perfectly spherical. Ranges from 0 (irregularly shaped) to 1 (spherical)
+ = _Compactness_: The degree to which objects are compact. Circles will be the most compact with a value of 1, with the value increasing with increasing shape irregularity.
+
+## Measurements Data Grid
+
+
+
+
+
+
+
+**1. Display Tabs**
+
+Switch views between the data grid and the measurement plots.
+
+**2. Data Grid**
+
+- _Measurement Name_: Name of the measurement.
+- _Split_: The split which the measurement results are analysed over.
+- _Mean_: The mean values over all the measurement values in the split.
+- _Median_: The median values over all the measurement values in the split.
+- _Standard Deviation_:The standard deviation over all the measurement values in the split.
+
+## Measurements Plots
+
+
+
+
+
+
+
+**1. Plot Controls**
+
+- _Plot Type_: The type of plot to use: Histogram, Scatter, Beeswarm
+- _Color Scheme_: The color scheme of the plot.
+- _x-Axis_: The measurement used for the x-axis.
+- _y-Axis_: The measurement used for the y-axis.
+- _Size_: The measurement used for the size mapping.
+- _Color_: The split type used for color mapping (by-category or by-partition)
+- _Number of Bins_: Number of binds to use in the histogram plot.
+- _Swarm Group_: The split type used for swarm grouping (by-category or by-partition)
+
+| Plot Name | Color Scheme | x-Axis | y-Axis | Size | Color | Num. Bins | Swarm Group |
+| --------- | ------------ | ------------ | ------------ | ------------ | ------------ | ------------ | ------------ |
+| Histogram | Configurable | Configurable | N/A | N/A | N/A | Configurable | N/A |
+| Scatter | Configurable | Configurable | Configurable | Configurable | Configurable | N/A | N/A |
+| Beeswarm | Configurable | N/A | Configurable | N/A | N/A | N/A | Configurable |
+
+**2. Plot Container**
+
+Displays the current plot.
+
+**3. Plot Tabs**
+
+Switch between multiple active plots. Click on the tab title to update the name of the plot, or click on the "X" button to remove the plot.
+
+**4. Create New Plot**
+
+Create a new plot
+
+**5. Save Plot**
+
+Save the currently viewd plot as a `.png` file.
diff --git a/piximi-documentation/pages/detail/projectviewer-classification.md b/piximi-documentation/pages/detail/projectviewer-classification.md
new file mode 100644
index 0000000..a9e8726
--- /dev/null
+++ b/piximi-documentation/pages/detail/projectviewer-classification.md
@@ -0,0 +1,210 @@
+# Classification
+
+Using Piximi, you can classify your images with either a pretrained classification model you upload, or from a model you trained from scratch using Piximi. Our goal is to make classification as easy ass possibly while exposing enough settings so that you can get precise and repeatable results.
+
+```{admonition} Uploaded Models
+:class: warning
+
+Piximi uses TensorFlowJS under the hood for all DL tasks. Any model you upload must be a TFJS Saved Model.
+```
+
+## Overview
+
+
+
+
+
+
+
+1. Task Selection
+2. Model I/O
+3. Model Selection
+4. Model Operations
+
+## Task Selection
+
+Switch between classification and segmentation tasks. The tasks will operate on the currently displayed Kind.
+
+## Model I/O
+
+### Local Model Loading
+
+
+
+
+
+
+
+**1. Model Types**
+
+Tensorflow models can have either a Layers framework or a Graph framework. We need to know which your model is in order to upload it correctly.
+
+**2. File Picker**
+
+Open a file picker and select the model files you want to upload. Piximi requires a `[model].json` description file as well as one or more `[model].weights.bin` files(s).
+
+Onces youve confirmed the type and selected the files, click "Open Classification Model" to upload it.
+
+### Remote Model Loading
+
+
+
+
+
+
+
+**1. Model Types**
+
+Tensorflow models can have either a Layers framework or a Graph framework. We need to know which your model is in order to upload it correctly.
+
+**2. File Picker**
+
+Enter the url access point for the remote model. If the model comes from TFHub (now absorbed into Kaggle), you must check the "From TFHub" box.
+
+Onces youve confirmed the type and selected the files, click "Open Classification Model" to upload it.
+
+## Model Operations
+
+### Fit
+
+Clicking the _Fit_ button will open up a dialog displaying the configurable model settings.
+
+
+
+
+
+
+
+**1. Error Info**
+
+This error icon will appear to alert you of any issues that are currently blocking classification. You can view the issue by hovering over the icon.
+
+**2. Fit Classifier Button**
+
+When you are ready to train the clasifier, click this. The button will be replaces with a progress indicator depicting the number of epochs completed.
+
+#### Hyperparameters
+
+**3. Model Architecture**
+
+Piximi offers training a model using two different architectures:
+
+- _SimpleCNN_: A simple convolutional neural-network consisting of a handful of processing layers.
+- _MobileNet_: Uses the popular MobileNet backbone for training the model.
+
+**4. Model Name**
+
+You can choose to change the name of the model before training. The name entered here will be the default filename when you save the classifier or the hyperparameters.
+
+**5. Hyperparameters**
+
+These settings control how your model learns from and infers on your data. Once a model is trained, these settings can no longer be updated in order to maintain reliability in the model.
+
+For a detailed description of each hyperparameter and how they affect training, go to the [](../technical/hyperparameters.md) section of this documentation.
+
+_Data Processing Settings_
+
+These settings control how your data is processed prior to both their use in training and inference. These can be split into two groups of operations: _Image Augmentation_ and _Data Partitioning_.
+
+- Image Augmentation: These settings control the size and shape of the images you want to feed to the model, as well as defining if or how you would like to crop them.
+- Data Partitioning: These settings control how your data is split between training and validation for model training purposes.
+
+_Optimization Settings_
+
+These settings dictate how the model will learn while it is being trained on your data, as well as the length of the training. It is broken up into two more sections: _Training Strategy_ and _Optimization_.
+
+- Training Strategy: This section will defint how long to train, and how many images will be in each batch for training.
+- Optimization: This section defined how the model calculates loss, how it corrects itself, and how often it checks.
+
+**6. Export Hyperparameters**
+
+Click on the **Export Hyperparameters** button to download a `.json` file with the model's settings.
+
+#### History Plots
+
+Displays the model's performance from epoch to epoch. Monitoring the training history can help identify common model fitting difficulties such as overfitting.
+
+
+
+
+
+
+**1. Accuracy Plot**
+
+This shows the change in accuracy for the _training_ and _validation_ sets per epoch. Generally, higher is better.
+
+**2. Loss Plots**
+
+Shows the change in training loss for the _training_ and _validation_ sets. Generally, lower is better.
+
+#### Model Summary
+
+Displays the model's summary, detailing each layer and a way to export this information.
+
+
+
+
+
+
+**1. Summary Table**
+
+For each layer in the model, displays:
+
+- Output shape
+- Number of parameters
+- Whether its frozen (not trainable)
+
+**2. Export Summary**
+
+Exports the model summary as a `.csv` file.
+
+### Predict
+
+Clicking the _Predict_ button will begin running inference on the images/object of the displayed Kind, using the selected model.
+
+
+
+
+
+
+**1. Hide Labeled Images**
+
+This will hid all of the manually categorized images so that you only so the predictions.
+
+**2. Clear Predictions**
+
+Reset the project to before the predition were made.
+
+**3. Accept Predictions**
+
+confirm the predicted categories. Once accepted you wont be able to revert to a state before they were categorized, so to prevent accidental acception we require users to press and hold the button.
+
+### Evaluate
+
+Clicking the _Evaluate_ button will open a dialog with the evaluation result of the current run, along with the previous runs for which evaluation has been done.
+
+
+
+
+
+
+**1. Select Run**
+
+View the current evaluated run, or select a previously evaluated run to view
+
+**2. Confusion Matrix**
+
+Displays the result of the valitation set predictions. Ideally you want the it to resemble an _identity metrix_ where the values on the diagonal are maximized and the surrounding values are zero. This would meant that the model accurately predicted the classes of the inference set.
+
+**3. Evaluation Metrics**
+
+Displays some averaged evaluation metrics for the run:
+
+- Accuracy
+- Cross Entropy
+- Precision
+- Recall
+- F-1 Score
+
+See our [image classification tutorial](../../pages/tutorials/classify-example-eukaryotic-image.md)
+and [object classification tutorial](../../pages/tutorials/create-cell-crops-with-cellprofiler.md) for usage information.
diff --git a/piximi-documentation/pages/detail/projectviewer-segmentation.md b/piximi-documentation/pages/detail/projectviewer-segmentation.md
new file mode 100644
index 0000000..f0afa8c
--- /dev/null
+++ b/piximi-documentation/pages/detail/projectviewer-segmentation.md
@@ -0,0 +1,71 @@
+# Segmentation
+
+The image segmentation module allows researchers to quickly identify cells or nuclei by selecting pre-trained segmentation models. Users can choose a model from the available options and apply it to images that are opened and selected.
+
+```{admonition} Cellpose: Remote Computation
+:class: warning
+
+All of the models run locally on your own device **EXCEPT** the Cellpose model. This has to do with the difficulty converting the Cellpose model, which utilized many custom functions, to a usable TFJS model. To get around this, we connect to the [BioEngine](https://aicell.io/project/bioengine/), maintained by the [AICell Lab](https://aicell.io/), server to run this model. It is possible that the server may stop responding and you will get an error during inference. If this happens you may just need to wait a little while for the server to reboot and try again.
+```
+
+## Overview
+
+
+
+
+
+
+
+1. Task Selection
+2. Model I/O
+3. Model Description
+4. Model Operations
+
+## Task Selection
+
+Switch between classification and segmentation tasks. The tasks will operate on the currently displayed Kind.
+
+## Model I/O
+
+### Model Selection
+
+
+
+
+
+
+
+**1. Model Selection**
+
+Piximi provides several segmentation models to choose from:
+
+- Cellpose (remote)
+- Stardist (fluo, VHE)
+- Gland Segmentation
+- COCO-SSD
+
+More informatino about the models can be found in the [Segmentation Tutorial](pages/tutorial/segmentation-tutorial.html#load-models).
+
+**2. Model Details**
+
+Displays information about the model, its sources, the Kind output, and a potential use-case.
+
+## Model Description
+
+Display the name of the currently selected model, as well as the name of the Kind of object it identifies.
+
+## Model Operations
+
+### Fit
+
+We are currently working on support for training a segmentation model.
+
+### Predict
+
+Clicking the _Predict_ button will begin running inference on the images/object of the displayed Kind, using the selected model. A new Kind will be created and populated with the identified objects.
+
+### Evaluate
+
+Since we do not support segmentation model training at this time, there is nothing to evaluate.
+
+See our [Segmentation Tutorial](../../pages/tutorials/segmentation-tutorial.md) for usage information.
diff --git a/piximi-documentation/pages/detail/projectviewer.md b/piximi-documentation/pages/detail/projectviewer.md
new file mode 100644
index 0000000..a2bd969
--- /dev/null
+++ b/piximi-documentation/pages/detail/projectviewer.md
@@ -0,0 +1,208 @@
+# Project Viewer
+
+The Project Viewer is where you can create **New** projects, **open** previous projects and images or example projects, and **save** your current project.
+
+This view is also where you can **categorize** your images and perform **classification** and **segmentation** tasks.
+
+
+
+
+
+
+
+1. Top Appbar
+2. Action Drawer
+3. Image/Object Grid
+4. Info Bar
+
+## Top Appbar
+
+
+
+
+
+
+
+### 1. Project Name
+
+Update the name of your project. Saved filenames will default to the project name.
+
+### 2. Sort
+
+Choose the order in which your images/Objects appear in the image grid. Options are by:
+
+- File Name
+- Category
+- Random
+- Image Name (Image name may be be updated by the user. Default is file name)
+
+### 3. Grid Item Size
+
+Adjust the size of the images/objects displayed in the grid
+
+### 4. Select/Deselect/Delete
+
+- Select all images/objects
+- Deselect all images/objects
+- Delete selected images/objects
+
+The number of selected images will be shown near the "Select All" button.
+
+```{admonition} Image Deletion
+:class: warning
+
+In the case that an image has associated annotations, deleting the image will also delete the annotations.
+```
+
+### 5. Categorize Images/Objects
+
+After selecting a subset of images/objects, you can use this dropdown menu to select an available category to apply to the selected images.
+
+### 6. Navigate to Image Viewer
+
+After selecting images or objects, navigate to the Image Viewer to view, annotate, or adjust the selected images. Selecting an object, or objects, and navigating to the Image Viewer will load the image the object belongs to, and highlight the object(s).
+
+### 7. Navigate to Measurements Viewer
+
+The Measurements View operates on all images/objects in the project, so no selection is necessary.
+
+## Action Drawer
+
+
+
+
+
+
+
+### 1. File I/O
+
+**Project Creation**
+
+You can start a new project by clicking the **New** button. You will be prompted to input a new project name, and the current project will be replaced with a blank one.
+
+**Open a Project**
+
+You can load a previously saved project from either the `.zip` file or the `.zarr` file. As with creating a new project, the current project will be replaced.
+
+You can also choose to load one of our example projects. These are:
+
+- MNIST -- Small subset of the MNIST database of handwritten digits
+- C. elegans -- Images of transgenic C. elegans expressing the promoter of gene clec-60 fused to GFP.
+- Human U2OS-cells -- "This image set is of a Transfluor assay where an orphan GPCR is stably integrated into the b-arrestin GFP expressing U2OS cell line.
+- Human U2OS-cells Cytoplasm Crops -- Images of cytoplasm to nucleus translocation of the Forkhead (FKHR-EGFP) fusion protein in stably transfected human osteosarcoma cells.
+- Human PLP1 Localization -- Human HeLa cells expressing the disease-associated variant of PLP1 protein, which localizes differently than the healthy version.
+- Malaria Infected Human Blood Smears -- Blood cells infected by P. vivax (malaria) and stained with Giemsa reagent.
+- U2OS Cell-Painting Experiment -- U2OS cells treated with an RNAi reagent and stained.
+
+**Upload Images**
+
+Open a file picker to select images you want to upload, or drag and drop them into the image grid. Piximi supports a variety of file types:
+
+- PNG
+- JPEG
+- TIFF
+- DICOM
+- BMP
+
+```{admonition} Image Channels
+:class: warning
+
+Currently, Piximi requires all images in a project to contain the same number of channels. In the case of 3D `.tiff` images, Piximi will load the file under the assumption that the number of channels equals that of the images in the project and will calculate the number of planes based off of that assumption.
+```
+
+**Save Project**
+
+Save your current project. Piximi saves the project as a compressed `.zip` file containing a `.zarr` file with the project data (images, objects, categories, measurements) as well as any trained classifiers used in the project.
+
+### 2. Learning Task
+
+This section contains the deep learning functionality of piximi (Classification and Segmentation). From this section, users can upload and train classification models, select pretrained segmentation models, perform inference on the images and objects in the project, evaluate the performance of the classifiaction models, and save trained models.
+
+More information about classificatin and segmentation can be viewed in their respective chapters.
+
+### 3. Categories
+
+This section contains the per-Kind categories in the project. You can create, edit, and delete categories here.
+
+When creating a new category, the category names must be unique within each kind, and you can select a category color from a pre-populated list.
+
+Each kind will contain an "Unknown" category which loaded images will default to.
+
+Deleting a category will recategorize the associated images as "Unknown".
+
+In addition to the "Categorize" button in the **Top App Bar** users can recategorize selected images by holding down the `shift` key, entering the index of the desired category (0, 1, 2, etc.) then releasing the `shift` key.
+
+### 4. App Controls
+
+This sections contains the app settings, functionality to report issues within the app to the GitHub project repo, and activation of the in-app help context.
+
+**App Settings**
+
+- Light/Dark Mode
+- Image selection border width/color
+- Sound Effects
+- Show image info when scrolling
+
+**Help Context**
+
+When activated, sections of the app which are associted with help informatino will be highlighted. Hovering over these sections will update the help dialog in the lower left of the screen with the relavent information. Hold down the `shift` key and click a section to lock the information dialog to that section.
+
+## Image/Object Grid
+
+
+
+
+
+
+
+### 1. Kind Tabs
+
+Piximi groups the project data into what we cal "Kinds". Kind are essential a **suppercategory**. Each project will have an "Image" kind which referes to whole images and cannot be deleted or edited.
+
+Additional kinds can be created, edited and deleted, and each kind has it own set of associated categories. For example, A project will contain a set of whole images (belonging to the "Image" kind). An image then may contain objects, such as "Nuclei" and "Cell Membrane" objects.
+
+In this example the project has three kinds -- "Image", "Nuclei", and "Cell Membrane". The object can then be grouped by category, for example the objects of kind "Nuclei" can be categorized as "Healthy" or "Infected".
+
+A simple structure could look like this:
+
+```
+kinds:{
+ Image:{
+ data:[...]
+ categories:[...]
+ },
+ Nuclei:{
+ data:[...],
+ categories:[Healthy, Infected, ...],
+ },
+ ...
+}
+```
+
+Each Kind tab contains functinoality for editing the kind name, minimizing the kind (removing the kind from the visible tabs) and deleting the kind.
+
+```{admonition} Deleting Kinds
+:class: warning
+
+Deleting a Kind will also delet all associated objects or images.
+```
+
+### 2. Create/Show Kinds
+
+Use the "+" button to create new kinds. Additionally, any kind that was previously hidden can be restored from the dropdown menu that appears upon clicking the button.
+
+### 3. Images/Objects
+
+The main image grid displays all of the images or objects in a project. Yuo can click on an image to select it, as well as view its category and some brief info.
+
+## Info Bar
+
+
+
+
+
+
+
+**1. Filter by category/training partition**
+
+**2. View image/object details**
diff --git a/piximi-documentation/PiximiConverter.ipynb b/piximi-documentation/pages/how-to/PiximiConverter.ipynb
similarity index 100%
rename from piximi-documentation/PiximiConverter.ipynb
rename to piximi-documentation/pages/how-to/PiximiConverter.ipynb
diff --git a/piximi-documentation/create-cell-crops-with-cellprofiler.md b/piximi-documentation/pages/how-to/create-cell-crops-with-cellprofiler.md
similarity index 81%
rename from piximi-documentation/create-cell-crops-with-cellprofiler.md
rename to piximi-documentation/pages/how-to/create-cell-crops-with-cellprofiler.md
index e13aabf..485ffc9 100644
--- a/piximi-documentation/create-cell-crops-with-cellprofiler.md
+++ b/piximi-documentation/pages/how-to/create-cell-crops-with-cellprofiler.md
@@ -4,34 +4,34 @@ While it is possible to segment/annotate and classify objects entirely within Pi
This guide explains how to create a CellProfiler pipeline that will first identify nuclei using the `IdentifyPrimaryObjects` module, followed by cell identification with the `IdentifySecondaryObjects` module. We will then create a multichannel RGB image with DNA colored blue and GFP colored green. Finally, these multichannel RGB images will be cropped based on the previously identified cell object using the `SaveCroppedObjects` module.
-The single cell image crops for the Piximi `Human U2OS-cells Cytoplasm Crops` example project were made using this workflow. The images are from the [BBBC013 cytoplasm-nucleus translocation dataset](https://bbbc.broadinstitute.org/BBBC013) from the Broad Bioimage Benchmark Collection. The CellProfiler pipeline used in the following example can be {download}`downloaded here ` for CellProfiler version **4.2.2**. For CellProfiler **4.2.1** or earlier, {download}`download this pipeline `.
+The single cell image crops for the Piximi `Human U2OS-cells Cytoplasm Crops` example project were made using this workflow. The images are from the [BBBC013 cytoplasm-nucleus translocation dataset](https://bbbc.broadinstitute.org/BBBC013) from the Broad Bioimage Benchmark Collection. The CellProfiler pipeline used in the following example can be {download}`downloaded here <../../downloads/BBBC013-translocation-crop-4.2.2.cppipe.zip>` for CellProfiler version **4.2.2**. For CellProfiler **4.2.1** or earlier, {download}`download this pipeline <../../downloads/BBBC013-translocation-crop-4.2.1.cppipe.zip>`.
## 1. Import images
To begin, drag and drop your images into the `Images` input module of CellProfiler.
-```{figure} ./img/user-guide-images-input-view.png
+```{figure} ../../img/cellprofiler-cell-crops-examples/user-guide-images-input-view.png
---
name: images-input-view
---
-Drag and drop your images into the `Images` input module.
+Drag and drop your images into the `Images` input module.
```
Next, select appropriate rules to categorize your files in the `NamesAndTypes` input module. In these images, files that contain `Channel2` in their filename are assign the name `rawDNA` and images that contain `Channel1` in their filename are assigned the name `rawGFP`.
-```{figure} ./img/user-guide-names-and-types-view.png
+```{figure} ../../img/cellprofiler-cell-crops-examples/user-guide-names-and-types-view.png
:class: img-shadow
---
name: NamesAndTypes-view
---
-Within the `NamesAndTypes` module, assign appropriate names for the DNA and GFP channels.
+Within the `NamesAndTypes` module, assign appropriate names for the DNA and GFP channels.
```
## 2. IdentifyPrimaryObjects
Then, add an `IdentifyPrimaryObjects` module and set your DNA image (e.g. `rawDNA`) as the input image. Name this primary object `Nuclei`. Adjust the parameters so an appropriate segmentation is achieved while using test mode.
-```{figure} ./img/user-guide-identify-primary-object-view.png
+```{figure} ../../img/cellprofiler-cell-crops-examples/user-guide-identify-primary-object-view.png
---
name: IdentifyPrimaryObjects-view
---
@@ -42,7 +42,7 @@ Add a IdentifyPrimaryObjects module and adjust the parameters to achieve adequat
Add an `IdentifySecondaryObjects` module and select the cell image (e.g. `rawGFP`) as the input image and `Nuclei` as the input objects. Name this Secondary object `Cells`. You can tune segmentation parameters to identify your cell objects or set the `Distance - N` method to identify secondary objects with a distance that captures the edge of most cells (e.g. `10`).
-```{figure} ./img/user-guide-identify-secondary-object-view.png
+```{figure} ../../img/cellprofiler-cell-crops-examples/user-guide-identify-secondary-object-view.png
---
name: IdentifySecondaryObjects-view
---
@@ -53,7 +53,7 @@ Add a IdentifySecondaryObjects module using the `rawGFP` as an input image and `
Now, we will create a multichannel RGB image using the input `rawDNA` and `rawGFP` images. Add a `GrayToColor` module and select `rawGFP` to be colored green and `rawDNA` to be colored blue. Name the output image `GFPandDNA`.
-```{figure} ./img/user-guide-color-to-gray-view.png
+```{figure} ../../img/cellprofiler-cell-crops-examples/user-guide-color-to-gray-view.png
---
name: ColorToGray-view
---
@@ -68,16 +68,16 @@ Next, add a `SaveCroppedObjects` module and select the `Cells` objects from `Ide
:class: seealso, dropdown
-CellProfiler versions 4.2.1 and earlier can also be adjusted to save cropped images into subdirectories named after the input filename. This can be achieved by enabling metadata extraction and using the extracted filename in the subfolder output path in SaveCroppedObjects. Below are the changes to the above mentioned pipeline to achieve this result. The above pipeline that has been modified for CellProfiler 4.2.1 can be {download}`downloaded here `.
+CellProfiler versions 4.2.1 and earlier can also be adjusted to save cropped images into subdirectories named after the input filename. This can be achieved by enabling metadata extraction and using the extracted filename in the subfolder output path in SaveCroppedObjects. Below are the changes to the above mentioned pipeline to achieve this result. The above pipeline that has been modified for CellProfiler 4.2.1 can be {download}`downloaded here <../../downloads/BBBC013-translocation-crop-4.2.1.cppipe.zip>`.
**1. Enable metadata extraction**
In the `Metadata` input module, select `Yes` on the `Extract Metadata` option. In the field marked `Regular expression to extract from file name` input the regular expression `\-(?P.*)\.`. This will extract the information after the first `-` and before the last `.` from the image filenames in the `BBBC013` dataset.
-*Why extract metadata for this portion of the filename?*
+*Why extract metadata for this portion of the filename?*
For each image there are two individual filenames, representing either `channel1` or `channel2`. If you choose to extract the entire filename, `SaveCroppedObjects` will be unable to reconcile which filename to use and instead use `None`. By using the regular expression mentioned above, the extracted filename will be the same across the two channels.
-```{figure} ./img/user-guide-metadata-view-4.2.1.png
+```{figure} ../../img/cellprofiler-cell-crops-examples/user-guide-metadata-view-4.2.1.png
---
name: metadata-view-4.2.1
---
@@ -88,7 +88,7 @@ Use the regular expression `\-(?P.*)\.` to extract an appropriate file
Within `SaveCroppedObjects`, select `Default Output Folder sub-folder` and then **right-click** in the `sub-folder` text box and select `FileName`.
-```{figure} ./img/user-guide-save-cropped-objects-view-4.2.1.png
+```{figure} ../../img/cellprofiler-cell-crops-examples/user-guide-save-cropped-objects-view-4.2.1.png
---
name: SaveCroppedObjects-view-4.2.1
---
@@ -99,7 +99,7 @@ Right click within the `Sub-folder` text box and select `FileName`, as defined i
````
-```{figure} ./img/user-guide-save-cropped-objects-view.png
+```{figure} ../../img/cellprofiler-cell-crops-examples/user-guide-save-cropped-objects-view.png
---
name: SaveCroppedObjects-view
---
diff --git a/piximi-documentation/citing-piximi.md b/piximi-documentation/pages/technical/citing-piximi.md
similarity index 100%
rename from piximi-documentation/citing-piximi.md
rename to piximi-documentation/pages/technical/citing-piximi.md
diff --git a/piximi-documentation/example-datasets.md b/piximi-documentation/pages/technical/example-datasets.md
similarity index 90%
rename from piximi-documentation/example-datasets.md
rename to piximi-documentation/pages/technical/example-datasets.md
index cf6b664..260a8fe 100644
--- a/piximi-documentation/example-datasets.md
+++ b/piximi-documentation/pages/technical/example-datasets.md
@@ -9,7 +9,7 @@ If you use any of the example datasets in a publication, please also cite the or
`````{grid}
````{grid-item}
:columns: 3
-```{image} img/example_project_icons/mnistExampleProjectIcon.png
+```{image} ../../img/example_project_icons/mnistExampleProjectIcon.png
:alt: mnistExampleIcon
:align: left
```
@@ -26,12 +26,12 @@ MNIST citation doi: 10.1109/5.726791
````
`````
-## *C. elegans*
+## _C. elegans_
`````{grid}
````{grid-item}
:columns: 3
-```{image} img/example_project_icons/cElegansExampleProjectIcon.png
+```{image} ../../img/example_project_icons/cElegansExampleProjectIcon.png
:alt: cElegansExampleIcon
:align: left
```
@@ -55,7 +55,7 @@ The *C. elegans* data were provided by Javier Irazoqui as BBBC012 in the [Broad
`````{grid}
````{grid-item}
:columns: 3
-```{image} img/example_project_icons/humanU2OSCellsExampleProjectIcon.png
+```{image} ../../img/example_project_icons/humanU2OSCellsExampleProjectIcon.png
:alt: humanU2OSCellsExampleIcon
:align: left
```
@@ -77,7 +77,7 @@ U2OS citation doi: 10.1038/nmeth.2083
`````{grid}
````{grid-item}
:columns: 3
-```{image} img/example_project_icons/BBBC013ExampleProjectIcon.png
+```{image} ../../img/example_project_icons/BBBC013ExampleProjectIcon.png
:alt: BBBC013ExampleIcon
:align: left
```
@@ -100,14 +100,14 @@ U2OS citation doi: 10.1038/nmeth.2083
`````{grid}
````{grid-item}
:columns: 3
-```{image} img/example_project_icons/PLP1ExampleProjectIcon.png
+```{image} ../../img/example_project_icons/PLP1ExampleProjectIcon.png
:alt: PLP1ExampleIcon
:align: left
```
````
````{grid-item}
:columns: 9
-These human HeLa cells express either wild type or the disease-associated variant of PLP1 protein, which localizes differently than the healthy version.
+These human HeLa cells express either wild type or the disease-associated variant of PLP1 protein, which localizes differently than the healthy version.
Channel 1 is artifacts, channel 2 is fluorescently tagged protein PLP1, and channel 3 is DNA.
The human PLP1 localization data were provided by Jessica Lacoste in [Mikko Taipale's lab](http://taipalelab.org/) at University of Toronto. \
diff --git a/piximi-documentation/hyperparameters.md b/piximi-documentation/pages/technical/hyperparameters.md
similarity index 100%
rename from piximi-documentation/hyperparameters.md
rename to piximi-documentation/pages/technical/hyperparameters.md
diff --git a/piximi-documentation/technical-faq.md b/piximi-documentation/pages/technical/technical-faq.md
similarity index 75%
rename from piximi-documentation/technical-faq.md
rename to piximi-documentation/pages/technical/technical-faq.md
index 6bf34d2..1380bf9 100644
--- a/piximi-documentation/technical-faq.md
+++ b/piximi-documentation/pages/technical/technical-faq.md
@@ -10,39 +10,41 @@
- [If I run Piximi multiple times, why do I get different traiing results?](run-model-multiple-times)
(if-piximi-crashes)=
+
## If Piximi crashes, how can I recover my project?
Currently, there is no mechanism to auto-save work. It is highly recommended to manually save work periodically as you go.
The "Save project" option will save the entire state of the project, including all images and annotations made on them, and model settings (preprocessing, architecture, optimization, and dataset settings), but not including the trained model weights.
-
+
-
+
To save the trained model weights, use the "Save classifier" option.
-
+
-
+
If Piximi crashes, reload your work by using the "Open project" option to load images and project settings.
-
+
-
+
Use the "Open classifier" option to load a trained model and its parameters.
-
+
Make sure to select both the weights (model paramaters) bin file and json (model architecture) file.
-
+
-
+
(can-i-run-piximi-offline)=
+
## Can I run Piximi offline?
Yes. Once you visit the application, there is no need for an internet connection so long as you do not close or refresh the tab. If you close or refresh the tab, you will need an internet connection to reload Piximi.
@@ -54,11 +56,13 @@ Segmentation models do require an internet connection and certain segmentation m
Segmentation models that transmit data over the internet are clearly indicated.
(is-there-logging)=
+
## Is there logging?
No. Piximi does not log any information, perform any telemetry, or make any external API calls.
(what-models-are-used)=
+
## What classifier models are used?
SimpleCNN, which uses 2 convolutional layers, 2 max pooling layers, and 1 dense layer. All layers are initialized with random weights and the entire model is trained.
@@ -66,6 +70,7 @@ SimpleCNN, which uses 2 convolutional layers, 2 max pooling layers, and 1 dense
[MobileNetV1](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md) (specifically, `MobileNet_v1_0.25_224`), which is a model pre-trained for image classification. Only the final convolutional layer's parameters are modified during training, the rest of the parameters in the model are frozen.
(what-if-internet-is-lost)=
+
## What if internet connection is lost while a model is training?
Once Piximi is loaded, no internet connection is necessary. You may keep working, save your project and model, and load previously saved projects and models. If you close the tab containing Piximi, or hit refresh on the browser, you will need an internet connection to reload Piximi.
@@ -73,21 +78,23 @@ Once Piximi is loaded, no internet connection is necessary. You may keep working
No internet connection is necessary to save or load projects and models.
(see-a-training-summary)=
+
## Is it possible to see a training summary?
Yes. The model summary, accuracy and loss are displayed in the Classifier dialog, and will remain there even if you leave the dialog and re-enter.
-
+
Additional metrics are available via the Evaluation dialog.
-
+
-
+
If a new model is trained however, or if the current model is re-trained, these will be lost. To avoid this, save the current model before performing any additional training.
(does-piximi-use-a-gpu)=
+
## Does Piximi use a GPU?
Yes. Piximi uses Tensorflow.js which in turn uses [WebGL](https://en.wikipedia.org/wiki/WebGL).
@@ -95,8 +102,9 @@ Yes. Piximi uses Tensorflow.js which in turn uses [WebGL](https://en.wikipedia.o
If using Chrome, users will need to enable GPU use by going into preferences -> advanced -> system, and enabling the "Use hardware acceleration when available" option.
(run-model-multiple-times)=
+
## If I run the same model multiple times, why do I get different training results?
-It is possible to get different training results when training on the same data. There are two major reasons for this: the first is a result of the random validation dataset that is selected by Piximi when you press  `Fit Classifier`. For example, the first time you fit the classifier, images 1, 2 and 3 may be selected for the validation dataset. A second, and identical, run of fit classifier might then select images 4, 5 and 6 as your validation dataset, which look different to the images selected in the first run. Validation image selection is random so that the model performance can be evaluated independently of the images selected for validation. Even if your validation data set were identical, however, your results may still end up slightly different run-to-run due to certain steps in the training process that draw on random numbers and/or shuffle the data; we do not currently but may in the future provide ways to stabilize these parameters across runs.
+It is possible to get different training results when training on the same data. There are two major reasons for this: the first is a result of the random validation dataset that is selected by Piximi when you press  `Fit Classifier`. For example, the first time you fit the classifier, images 1, 2 and 3 may be selected for the validation dataset. A second, and identical, run of fit classifier might then select images 4, 5 and 6 as your validation dataset, which look different to the images selected in the first run. Validation image selection is random so that the model performance can be evaluated independently of the images selected for validation. Even if your validation data set were identical, however, your results may still end up slightly different run-to-run due to certain steps in the training process that draw on random numbers and/or shuffle the data; we do not currently but may in the future provide ways to stabilize these parameters across runs.
Given these considerations, please do save your models frequently if you think they are performing well - you can always delete an old model later, but a generated model cannot be sure to be generated again if it hasn't been saved!
diff --git a/piximi-documentation/work-in-progress.md b/piximi-documentation/pages/technical/work-in-progress.md
similarity index 100%
rename from piximi-documentation/work-in-progress.md
rename to piximi-documentation/pages/technical/work-in-progress.md
diff --git a/piximi-documentation/classify-example-eukaryotic-image.md b/piximi-documentation/pages/tutorial/classify-example-eukaryotic-image.md
similarity index 56%
rename from piximi-documentation/classify-example-eukaryotic-image.md
rename to piximi-documentation/pages/tutorial/classify-example-eukaryotic-image.md
index 1eb2eee..0f3d85c 100644
--- a/piximi-documentation/classify-example-eukaryotic-image.md
+++ b/piximi-documentation/pages/tutorial/classify-example-eukaryotic-image.md
@@ -1,12 +1,12 @@
# Image Classification
-## 1. Load images
-To begin, we will load the images from an example dataset included in Piximi by pressing  `Open` in the top left. Select `Open` > `Project` > `Example Project` > `Human U2OS-cells example project` to get started. Alternatively, if you would like to load your own images, go to `Open` > `Image` > `New Image`.
+## 1. Load images
-The images correspond to U2OS cells co-expressing arrestin-GFP and an orphan GPCR. Upon receptor stimulation arrestin-GFP is recruited to the plasma membrane and eventually endocytosed resulting in vesicle like structures.
+To begin, we will load the images from an example dataset included in Piximi by pressing  `Open` in the top left. Select `Open` > `Project` > `Example Project` > `Human U2OS-cells example project` to get started. Alternatively, if you would like to load your own images, go to `Open` > `Image` > `New Image`.
+The images correspond to U2OS cells co-expressing arrestin-GFP and an orphan GPCR. Upon receptor stimulation arrestin-GFP is recruited to the plasma membrane and eventually endocytosed resulting in vesicle like structures.
-```{figure} ./img/user-guide-open-img.png
+```{figure} ../../img/eukaryotic-classification/load-example-light.webp
---
name: open-img
---
@@ -15,24 +15,25 @@ Open the U2OS example dataset
## 2. Categorize images
-In the `Categories` sub-menu on the left hand side you can see that there are already 3 classes defined for the U2OS example project. To turn on/off the display of images under a given label, click on the  filters icon on the right hand panel, and toggle the label of interest under `By category`. The classes are:
+In the `Categories` sub-menu on the left hand side you can see that there are already 3 classes defined for the U2OS example project. To turn on/off the display of images under a given label, click on the  filters icon on the right hand panel, and toggle the label of interest under `By category`. The classes are:
+
- Unknown
- This represents the uncategorized images. Piximi will predict which class these images belong to later
- Positive Control (vesicular GFP)
- Negative Control (cytoplasmic GFP)
-```{figure} ./img/user-guide-u2os-label-highlight.png
+```{figure} ../../img/eukaryotic-classification/human-u20s-category-light.webp
---
name: u2os-labels
---
-Explore the category menu. Turn on/off (/) a particular category to show/hide only those images.
+Explore the category menu. Turn on/off (/) a particular category to show/hide only those images.
```
-
-Ensure no images are currently selected by clicking the  `Deselect` icon. Then, single-click to select 2-3 images from the unknown category that best fit the `Negative Control` category. Once selected, click `Categorize` in the top right and select `Negative Control`. Do the same for 2-3 `Positive Control` images.
+Ensure no images are currently selected by clicking the  `Deselect` icon. Then, single-click to select 2-3 images from the unknown category that best fit the `Negative Control` category. Once selected, click `Categorize` in the top right and select `Negative Control`. Do the same for 2-3 `Positive Control` images.
+
```{math}
:label: accuracy_equation
Accuracy = \frac{\text{Number of correct predictions}}{\text{Total number of predictions}}
```
-`Validation Accuracy` is the accuracy when the model examines the **validation** subset of the data.
+`Validation Accuracy` is the accuracy when the model examines the **validation** subset of the data.
```{admonition} Validation accuracy vs accuracy
:class: tip, dropdown
@@ -93,11 +95,11 @@ If you notice that your `Validation accuracy` value decreases as epochs increase
This is a result of **overfitting** as your model begins to pick up features within your image, such as noise, that are not relevant to classification. In essence, overfitting is when the model memorizes the answer to a specific question, rather than determining the answer from scratch itself.
```
-Loss is another metric that is calculated on the training and validation subsets of data and are depicted as loss and validation loss, respectively. Loss represents a summation of the errors the model has made during classification.
+Loss is another metric that is calculated on the training and validation subsets of data and are depicted as loss and validation loss, respectively. Loss represents a summation of the errors the model has made during classification.
-You can now exit the `Fit Model` settings by clicking the  in the top left of the dialog.
+You can now exit the `Fit Model` settings by clicking the  in the top left of the dialog.
-```{figure} ./img/user-guide-exit-fit.png
+```{figure} ../../img/eukaryotic-classification/human-u20s-fit-dialog-exit-light.webp
---
name: fit-exit
---
@@ -114,21 +116,20 @@ Piximi does not currently have a hold-out test-like set.
## 4. Predict classes for unlabelled data
-Once your model has been trained you can click  `Evaluate` to see in-depth metrics on how well the model performed. You can then click  `Predict` to run the trained model on the unannotated data. Once an image has been classified you will see the  color on the image thumbnail update to that particular class. At this stage, you may inspect the predicted classes and either accept the predictions by clicking and holding  `Accept Predictions`or reject them by clicking  `Clear Predictions`. Depending on the performance of the model, annotating further images based on the predictions and/or adjusting the `Fit Model` settings may be desired.
+Once your model has been trained you can click  `Evaluate` to see in-depth metrics on how well the model performed. You can then click  `Predict` to run the trained model on the unannotated data. Once an image has been classified you will see the  color on the image thumbnail update to that particular class. At this stage, you may inspect the predicted classes and either accept the predictions by clicking and holding  `Accept Predictions`or reject them by clicking  `Clear Predictions`. Depending on the performance of the model, annotating further images based on the predictions and/or adjusting the `Fit Model` settings may be desired.
-```{figure} ./img/user-guide-u2os-run-predict.png
+```{figure} ../../img/eukaryotic-classification/human-u20s-predict-light.webp
---
name: run-predict
---
Predict the class of unknown images using your trained model.
```
-
+
+````-->
```{admonition} Copyright
:class: seealso
The [BBBC016](https://bbbc.broadinstitute.org/BBBC016) images used here are licensed under a [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/) by Ilya Ravkin.
```
-
-
diff --git a/piximi-documentation/measurements.md b/piximi-documentation/pages/tutorial/creating-measurements.md
similarity index 75%
rename from piximi-documentation/measurements.md
rename to piximi-documentation/pages/tutorial/creating-measurements.md
index 54f1cdd..d72f2c0 100644
--- a/piximi-documentation/measurements.md
+++ b/piximi-documentation/pages/tutorial/creating-measurements.md
@@ -1,21 +1,21 @@
-# Measurements
+# Creating Measurements
-While sometimes, the goal of an experiment is just to find objects or to classify images and/or objects, other times, we want to go deeper and learn more about the images and/or objects we care about. This is where *Measurements* come in.
+While sometimes, the goal of an experiment is just to find objects or to classify images and/or objects, other times, we want to go deeper and learn more about the images and/or objects we care about. This is where _Measurements_ come in.
Right now, Piximi supports two broad classes of measurements - intensity measurements, which work for images OR objects, and shape/geometry measurements, which work for objects alone. More categories of measurements will be rolling out in time - stay tuned!
-Measurements are available from the  icon in the top bar of Piximi.
+Measurements are available from the  icon in the top bar of Piximi.
-Measurements are created in individual tables - each table is specific to one "kind" - either images or a type of segmented object (such as cells). You can create as many tables as you like.
-Each table has measurements broken down by *Splits* - essentially, what are the different conditions in this experiment (if any) and how should we parse them? Each table has its own set of splits, which you can adjust at any time.
+Measurements are created in individual tables - each table is specific to one "kind" - either images or a type of segmented object (such as cells). You can create as many tables as you like.
+Each table has measurements broken down by _Splits_ - essentially, what are the different conditions in this experiment (if any) and how should we parse them? Each table has its own set of splits, which you can adjust at any time.
-# Creating Measurements in Piximi
+# Create Measurements in Piximi
-1. Click the [Measurements](./icons/ruler-icon.svg) icon in the top bar of Piximi.
+1. Click the [Measurements](../../icons/ruler-icon.svg) icon in the top bar of Piximi.
2. Click the `+` button next to the word `Tables` to create a new measurement table. You can make as many tables as you like, to express different images or objects, broken down by different subsets.
-```{figure} ./img/measurement-walkthrough/measure-00-pre-measurement.png
+```{figure} ../../img/measurement-walkthrough/measure-00-pre-measurement.png
---
name: measurement-00-table-plus
scale: 50%
@@ -25,7 +25,7 @@ The `+` button at the top of the Tables menu allows you to add a new table
3. Select a `kind` you'd like to make measurements of. Reminder, `kind`s include the images, as well as any objects you created via segmentation or annotation.
-```{figure} ./img/measurement-walkthrough/measure-01-create-table.png
+```{figure} ../../img/measurement-walkthrough/measure-01-create-table.png
---
name: measure-01-create-table
scale: 50%
@@ -35,7 +35,7 @@ Select what kind of measurement table you'd like to make
4. Piximi will begin to calculate metrics in the background. Depending on the number of examples present in that `kind`, this may take a few minutes - an indicator will show you the progress made.
-```{figure} ./img/measurement-walkthrough/measure-02-spin-while-creating-table.png
+```{figure} ../../img/measurement-walkthrough/measure-02-spin-while-creating-table.png
---
name: measure-02-spin-while-creating-table
scale: 50%
@@ -45,7 +45,7 @@ The circular indicator next to `Tables` indicates how far through measurement ge
5. Once the initial pre-calculations are done, you can select which measurements you'd like to make, as well as how you'd like to subset "split" the data (currently by `Category` or by `Partition`; if you only have one category, tha's fine, but you do have to click something here!). As before, this will take some time, so just leave the Piximi tab open and the circular indicator will let you know how long this is going to take.
-```{figure} ./img/measurement-walkthrough/measure-03-select-splits-and-measurements.png
+```{figure} ../../img/measurement-walkthrough/measure-03-select-splits-and-measurements.png
---
name: measure-03-select-splits-and-measurements
scale: 50%
@@ -55,7 +55,7 @@ Select the split or splits you want to measure for each table, as well as the me
6. Once your measurements have been generated, they will appear in the main area in a data grid! In the grid, you can see the measurement name, the subset (or `Split`) of the data represented, and the Mean, Median, and Standard deviation of the data for that feature in that subset.
-```{figure} ./img/measurement-walkthrough/measure-04-data-grid.png
+```{figure} ../../img/measurement-walkthrough/measure-04-data-grid.png
---
name: measure-04-data-grid
---
@@ -64,7 +64,7 @@ The Piximi data grid
7. If you'd like to plot data in Piximi, simply navigate to the data grid containing the data you want to plot and hit the Plots tab at the top. You can make as many plots as you would like by hitting the `+` button on the plots tab. Plots can also be exported by saving to PNG. Piximi includes several plot types, including scatter plots, swarm plots, and histograms.
-```{figure} ./img/measurement-walkthrough/measure-05-scatter.png
+```{figure} ../../img/measurement-walkthrough/measure-05-scatter.png
---
name: measure-05-scatter
scale: 50%
@@ -72,7 +72,7 @@ scale: 50%
Scatter plots generated in Piximi can have X, Y, and Size measurements selected; they can also show colors according to the selected splits (with many colormaps to choose from). Note the "Save to PNG" button in the bottom right!
```
-```{figure} ./img/measurement-walkthrough/measure-06a-swarm-no-category.png
+```{figure} ../../img/measurement-walkthrough/measure-06a-swarm-no-category.png
---
name: measure-06a-swarms
scale: 50%
@@ -80,7 +80,7 @@ scale: 50%
Swarm plots can be shown on their own...
```
-```{figure} ./img/measurement-walkthrough/measure-06b-swarm-with-category.png
+```{figure} ../../img/measurement-walkthrough/measure-06b-swarm-with-category.png
---
name: measure-06b-swarms
scale: 50%
@@ -88,7 +88,7 @@ scale: 50%
... or with a summary boxplot overlaid.
```
-```{figure} ./img/measurement-walkthrough/measure-07-histogram.png
+```{figure} ../../img/measurement-walkthrough/measure-07-histogram.png
---
name: measure-07-histogram
scale: 50%
@@ -96,9 +96,9 @@ scale: 50%
You can choose to represent your data as a histogram, with a custom number of bins.
```
-If you want to do further analyses of your data (and you should)!, you can export your data tables from the measurements menu - this will let you do whatever downstream analysis you like.
+If you want to do further analyses of your data (and you should)!, you can export your data tables from the measurements menu - this will let you do whatever downstream analysis you like.
-```{figure} ./img/measurement-walkthrough/measure-08-export-measurements.png
+```{figure} ../../img/measurement-walkthrough/measure-08-export-measurements.png
---
name: measure-08-export-measurements
scale: 50%
diff --git a/piximi-documentation/segmentation.md b/piximi-documentation/pages/tutorial/segmentation-tutorial.md
similarity index 52%
rename from piximi-documentation/segmentation.md
rename to piximi-documentation/pages/tutorial/segmentation-tutorial.md
index 451b99c..f6c2650 100644
--- a/piximi-documentation/segmentation.md
+++ b/piximi-documentation/pages/tutorial/segmentation-tutorial.md
@@ -1,15 +1,13 @@
-# Segmentation
-
# Image Segmentation
The image segmentation module allows researchers to quickly identify cells or nuclei by selecting pre-trained segmentation models. Users can choose a model from the available options and apply it to images that are opened and selected in Piximi for inference.
## 1. Load images
-To begin, loading the images from an example dataset included in Piximi by pressing  `Open` in the top left. Select `Image` > `example image` > `U2OS cell-painting experiment ` to get started. Alternatively, if you would like to load your own images, press  `Open image` in the top right.
+To begin, loading the images from an example dataset included in Piximi by pressing  `Open` in the top left. Select `Image` > `example image` > `U2OS cell-painting experiment ` to get started. Alternatively, if you would like to load your own images, press  `Open image` in the top right.
The images correspond to U2OS cells U2OS cells treated with an RNAi reagent
-(https://portals.broadinstitute.org/gpp/public/clone/details?cloneld=TRCN0000195467) and stained for a cell-painting experiment.
+(https://portals.broadinstitute.org/gpp/public/clone/details?cloneld=TRCN0000195467) and stained for a cell-painting experiment.
## 2. Load Models
@@ -17,55 +15,65 @@ Currently, the annotator provides five pre-trained models, each designed for spe
- **Cellpose**: A generalist algorithm for cellular segmentation and trained on fluorecence images
- **Stardistfluo**: Trained on fluorecence images, ideal for identifying objects with star-convex shapes
-- **StardistVHE**: To identify nuclei in hematoxylin and eosin (H&E) stained image.
+- **StardistVHE**: To identify nuclei in hematoxylin and eosin (H&E) stained image.
- StardistFluo
- **COCO-SSD**: To identify objects in “natural images” (or photographs) of 80 different classes (such as humans and kites) using the COCO format
- **GlandSegmentation**: To segment intestinal glands trained on the Gland Segmentation in Colon Histology Images Challenge Contest (GlaS)29
In the 'Learning task' sub-menu on the left-hand side, click the 'Segmentation' button to switch the classification to cell segmentation.
-
-
+

+

-*Segmentation interface*
+_Segmentation interface_
-Then, click the '+ Load Model' button to select a model. Currently, only the pre-trained model is available for selection.
+Then, click the '+ Load Model' button to select a model. Currently, only the pre-trained model is available for selection.
-
+

+

-*Segmentation model selection*
+_Segmentation model selection_
-Note: Cellpose is currently unique in that it runs on the AI4Life project’s BioEngine30 server while StarDist, like other Piximi models, runs client-only in the user’s own browser without data leaving their machine.
+
+
+
+
+
+Note: Cellpose is currently unique in that it runs on the AI4Life project’s BioEngine30 server while StarDist, like other Piximi models, runs client-only in the user’s own browser without data leaving their machine.
Stay tuned for 'upload local' and fetch 'remote feaure'!
## 3. Run the model
-click on  'predict model' to run the model you selected for the image selected for segmentation
+
+click on  'predict model' to run the model you selected for the image selected for segmentation
-
-*Model prediction*
+

+

-
+_Model prediction_
+
## 4. Segmentation output
-In this example, we use the Cellpose-Cell model to segment cells in the image. To view the segmented cells, switch the channels from 'Image' to 'Cellpose-Cell'  at the top center of the interface.
+In this example, we use the Cellpose-Cell model to segment cells in the image. To view the segmented cells, switch the channels from 'Image' to 'Cellpose-Cell'  at the top center of the interface.
-
-*Examples of segmentation output*
+

+

+
+_Examples of segmentation output_
diff --git a/piximi-documentation/pages/tutorial/translocation_tutorial.md b/piximi-documentation/pages/tutorial/translocation_tutorial.md
new file mode 100644
index 0000000..d593157
--- /dev/null
+++ b/piximi-documentation/pages/tutorial/translocation_tutorial.md
@@ -0,0 +1,379 @@
+# Piximi beginner tutorial (ENGLISH)
+
+> **Installation-free segmentation and classification in the browser**
+>
+> Beth Cimini, Le Liu, Esteban Miglietta, Paula Llanos, Nodar Gogoberidze
+>
+> Broad Institute of MIT and Harvard, Cambridge, MA.
+
+### **Background information:**
+
+#### **What is Piximi?**
+
+Piximi is a modern, no-programming image analysis tool leveraging deep learning. Implemented as a web application at [https://piximi.app/](https://piximi.app/), Piximi requires no installation and can be accessed by any modern web browser. Its client-only architecture preserves the security of researcher data by running all computation locally\*.
+
+Piximi is interoperable with existing tools and workflows by supporting import and export of common data and model formats. The intuitive interface and easy access to Piximi allows biological researchers to obtain insights into images within just a few minutes. Piximi aims to bring deep learning-powered image analysis to a broader community by eliminating barriers to entry.
+
+\* except for the segmentations using Cellpose, which are sent to a remote server (with the permission of the user).
+
+Core functionalities: **Annotator, Segmentor, Classifier, Measurments.**
+
+#### **Goal of the exercise**
+
+In this exercise, you will familiarize yourself with Piximi’s main functionalities of annotation, segmentation, classification, measurement and visualization and use it to analyze a sample image dataset from a translocation experiment. The goal of this experiment is to determine the **lowest effective dose** of Wortmannin required to induce GFP-tagged FOXO1A nuclear localization (Figure 1). You will segment the images using one of the deep learning models available in Piximi, check and curate the segmentation, then train an image classifier to classify the individual cells as having “nuclear-GFP”, “cytoplasmic-GFP” or “no-GFP”. Finally, you will make measurements and plot them to answer the biological question.
+
+#### **Context of the sample experiment**
+
+In this experiment, researchers imaged fixed U2OS osteosarcoma (bone cancer) cells expressing a FOXO1A-GFP fusion protein and stained DAPI to label the nuclei. FOXO1 is a transcription factor that plays a key role in regulating gluconeogenesis and glycogenolysis through insulin signaling. FOXO1A dynamically shuttles between the cytoplasm and nucleus in response to various stimuli. Wortmannin, a PI3K inhibitor, can block nuclear export, resulting in the accumulation of FOXO1A in the nucleus.
+
+
+
+

+
+_Schematic representation of FOXO1A mechanism_
+
+
+
+#### **Materials necessary for this exercise**
+
+The materials needed in this exercise can be downloaded from: [PiximiTutorial](./downloads/Piximi_Translocation_Tutorial_RGB.zip). The “Piximi Translocation Tutorial RGB.zip” file contains a Piximi project, including all the images, already labeled with the corresponding treatment (Wortmannin concentration or Control). Download this file but **do NOT unzip it**!
+
+#### **Exercise instructions**
+
+Read through the steps below and follow instructions where stated. Steps where you must figure out a solution are marked with 🔴 TO DO.
+
+##### 1. **Load the Piximi project**
+
+🔴 TO DO
+
+- Start Piximi by going to:[https://piximi.app/](https://piximi.app/)
+
+- Load the example project: Click “Open” \- “Project” \- “Project from Zip” to upload a project file for this tutorial from Zip, and you can optionally change the project name in the top left panel, such as “Piximi Exercise”. As it is loaded, you can see the progression in the top left corner logo
.
+
+
+
+

+

+
+_Loading a project file._
+
+
+
+##### 2. **Check the loaded images and explore the Piximi interface**
+
+
+
+

+

+
+_Viewing the project images._
+
+
+
+These 17 images represent Wortmannin treatments at eight different concentrations (expressed in nM), as well as mock treatments (0nM). Note the DAPI channel (Nuclei) is shown in magenta and that the GFP channel (FOXOA1) is shown in green.
+
+As you hover over the image, color labels are displayed on the left corner of the images. These annotations are from metadata in the zipped file we just uploaded. In this tutorial, the different colored labels indicate the concentration of Wortmannin, while the numbers represent the number of images in each category.
+
+Optionally, you can annotate the images manually by clicking “+ Category”, entering your label, and then selecting the image by clicking the images annotating the selected images by clicking **“Categorize”**. In this tutorial, we’ll skip this step since the labels were already uploaded at the beginning. More information can be found in the [Project Viewer](../detail/projectviewer.md) section of the docs.
+
+##### 3. **Segment Cells - Find out the cells from the background**
+
+🔴 TO DO
+
+- To start the prediction on all images, click “Select All Images” in the top panel.
+
+
+
+

+

+
+
+
+
+
+
+- Change the Learning Task to “SEGMENTATION”.
+
+
+
+

+

+
+
+
+
+
+
+- Click on “+ LOAD MODEL” and the window will pop up, allowing you to choose a pretrained model.
+
+
+
+

+

+
+
+
+
+
+
+- For today’s exercise, select “Cellpose”. More information about the model supported can be found [here](./segmentation-tutorial.md#2-load-models). Click “Open Segmentation Model” to load your model and select it.
+
+
+
+

+

+
+
+
+
+
+
+- Finally, click “Predict Model”. You’ll see the prediction progress displayed in the top left corner beneath the Piximi logo.
+
+
+
+

+

+
+
+
+
+
+
+Please note that the previous steps were performed on your local machine, meaning your images are stored locally. However, Cellpose inference runs in the cloud, which means your images will be uploaded for processing. If your images are highly sensitive, please exercise caution when using cloud-based services.
+
+##### 4. **Visualize segmentation result and fix the segmentation errors**
+
+🔴 TO DO
+
+- Click on the **CELLPOSE_CELLS** tab to check the individual cells that have been segmented. Select some identified objects or whole images, then click "Annotate" in the top bar to view them in the ImageViewer.
+
+
+
+

+

+
+_Viewing the project images in the Image Viewer._
+
+
+
+- Optionally, here you can manually refine the segmentation using the annotator tools. The Piximi annotator provides several options to **add**, **subtract**, or **intersect** annotations. Additionally, the **selection tool** allows you to **resize** or **delete** specific annotations. To begin editing, select specific or all images by clicking the checkbox at the top.
+- Optionally, you can adjust channels: Although there are two channels in this experiment, the nuclei signal is duplicated in both the red and green channels. This design is intended to be **color-blind friendly** and to produce a **magenta color** for nuclei. The **green channel** also includes cytoplasmic signals.
+
+Another reason for duplicating the channels is that some models—such as the **Cellpose model** we used today—require a **three-channel** input.
+
+- You can choose to manually segment the cells to generate masks for ground truth data.
+
+##### **Classify cells**
+
+Reason for doing this: We want to classify the 'CELLPOSE_CELLS' based on GFP distribution (on Nuclei, cytoplasm, or no GFP) without manually labeling all of them. To do this, we can use the classification function in Piximi, which allows us to train a classifier using a small subset of labeled data and then automatically classify the remaining cells.
+
+🔴 TO DO
+
+- Go to the **CELLPOSE_CELLS** tab that displays the segmented objects.
+
+
+
+

+

+
+_Viewing the cellpose_cells kind._
+
+
+
+- Click on the **Classification** tab on the left panel.
+
+
+
+

+

+
+_Action Drawer Classifier Section._
+
+
+
+- Create new categories by clicking **“+ Category”**. Adding “Cytoplasmatic_GFP”, “Nuclear_GFP”, “No_GFP” three categories.
+
+
+
+

+

+
+_Create Category Button._
+
+
+
+- Click on the images that match your criteria. You can select multiple cells by holding **Command (⌘)** on Mac or **Shift** on Linux. Aim to assign **\~20–40 cells per category**. Once selected, click **“Categorize”** to assign the labels to the selected cells.
+
+
+
+

+

+
+_Classifying individual cells based on GFP presence and localization._
+
+
+
+##### 6. **Train the Classifier model**
+
+🔴 TO DO
+
+- Click the ”
- Fit Model” icon to open the model hyperparameter settings. For today’s exercise, we’ll adjust a few parameters:
+- Click on “Architecture Settings” and set the Model Architecture to **SimpleCNN**.
+- Update the Input Dimensions to:
+
+ - Input rows: 48
+ - Input cols: 48
+ - Channels: 3 (since our images are in RGB format)
+
+ (You can change to other numbers such as 64, 128)
+
+- In the “Data Partitioning” section, set the Training Percentage to 0.75, which reserves 25% of the labeled data for validation.
+
+
+
+

+

+
+_Classifier Model Setup._
+
+
+
+- When you click "**Fit Classifier**" in Piximi, two training plots will appear “**Accuracy vs Epochs**" and "**Loss vs Epochs**". Each plot shows curves for both **training** and **validation** data.
+
+
+
+

+

+
+_Training History Plots._
+
+
+
+- In the **accuracy plot**, you’ll see how well the model is learning. Ideally, both training and validation accuracy should increase and stay close.
+- In the **loss plot**, lower values mean better performance. If validation loss starts rising while training loss keeps dropping, the model might be overfitting.
+
+These plots help you understand how the model is learning and whether adjustments are needed.
+
+##### 7. **Evaluate model:**
+
+🔴 TO DO
+
+
+
+

+

+
+_Training Run Evaluation._
+
+
+
+- Click "**Predict Model**" to apply the model we just trained. This step will generate predictions on the cells we did not annotate.
+
+
+
+

+

+
+_Predict Classifier._
+
+
+
+- You can review the predictions in the CELLPOSE_CELLS tab and delete any wrongly assigned categories.
+- Optionally, you can continue using the labels to refine the ground truth and improve the classifier. This process is part of the **Human-in-the-loop classification**, where you iteratively correct and train the model based on human input.
+- Click "**Evaluate Model**" to evaluate the model we just trained. The confusion metrics and evaluation metrics can be compared to the ground truth.
+- Click "Accept Prediction (Hold)”, to assign the predicted labels to all the objects.
+
+
+
+

+

+
+_Accept Predictions._
+
+
+
+##### 8. **Measurement**
+
+Once you are satisfied with the classification, we will proceed to measure the objects. The goal of today’s exercise is to determine the minimum concentration of Wortmannin required to block the export of FOXO1A-GFP from the nuclei. To do this, we can measure the total GFP intensity at either the image level or the object level.
+
+🔴 TO DO
+
+- Click “Measurement” in the top right corner.
+
+
+
+

+

+
+_Navigate to Measurements._
+
+
+
+- Click the "**+**" next to "**Tables**" and select "**Image**" and click “**Confirm**”. _Note: Preparing the data for the measurement step may take some time to process_.
+
+
+
+

+

+
+_Create "**Image**" measurement table._
+
+
+
+- In the "**Splits**" section, click on 'Category' to include all categories in the measurement.
+- Under "**Total**", click on "**Channel 1**" to select the measurement for GFP. You will see the measurement in the “**DATA GRID**” tab. Measurements are presented as either mean or median values, and the full dataset is available upon exporting the `.csv` file.
+
+
+
+

+

+
+_Calculated Measurements._
+
+
+
+##### 9. **Visualization**
+
+After generating the measurements, you can plot the measurements.
+
+🔴 TO DO
+
+- Click on "**PLOTS**" to visualize the measurements.
+
+
+
+

+

+
+_Measurement Plots._
+
+
+
+- Set the plot type to "**Swarm**" and choose a color theme based on your preference.
+- Select "**Y-axis**" as "**intensity-total-channel-1**" and set "**SwarmGroup**" to "**category**"; this will generate a curve showing how GFP intensity varies across different categories.
+- Selecting "**Show Statistics**" will display the mean, as well as the upper and lower confidence boundaries, on the plot.
+- Optionally, you can experiment with different plot types and axes to see if the data reveals additional insights.
+
+
+
+

+

+
+_Calculated Measurements._
+
+
+
+##### 10. **Export results and save the project**
+
+🔴 TO DO
+
+- Click “SAVE” in the top left corner to save the entire project. You'll see the Piximi logo animation as the save progresses
.
+
+##### 11. **Supporting Information**
+
+Check out the Piximi paper: [https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2](https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2)
+
+Check out the Piximi documentation:[Piximi documentation](https://documentation.piximi.app/intro.html):[https://documentation.piximi.app/intro.html](https://documentation.piximi.app/intro.html)
+
+Report bugs/errors or request features [https://github.com/piximi/documentation/issues](https://github.com/piximi/documentation/issues)
diff --git a/piximi-documentation/pages/tutorial/translocation_tutorial_ES.md b/piximi-documentation/pages/tutorial/translocation_tutorial_ES.md
new file mode 100644
index 0000000..135cc6b
--- /dev/null
+++ b/piximi-documentation/pages/tutorial/translocation_tutorial_ES.md
@@ -0,0 +1,379 @@
+# Tutorial Inicial de Piximi (ESPAÑOL)
+
+> **Segmentación y clasificación sin instalación en el navegador**
+>
+> Beth Cimini, Le Liu, Esteban Miglietta, Paula Llanos, Nodar Gogoberidze
+>
+> Instituto Broad del MIT y Harvard, Cambridge, MA.
+
+### **Información general:**
+
+#### **¿Qué es Piximi?**
+
+Piximi es una herramienta moderna de análisis de imágenes tomando ventaja de varios métodos de _deep learning_, sin requerir conocimientos de programación. Implementado como una aplicación web en [https://piximi.app/](https://piximi.app/), Piximi no requiere instalación y se puede acceder desde cualquier navegador web moderno. Su arquitectura de cliente único preserva la seguridad de los datos del investigador ejecutando todos los cálculos localmente\*.
+
+Piximi es interoperable con herramientas y flujos de trabajo existentes, ya que admite la importación y exportación formatos de datos y modelos comunes. La interfaz intuitiva y el fácil acceso a Piximi permiten a los biólogos obtener información sobre las imágenes en tan sólo unos minutos. Piximi tiene como objetivo llevar el análisis de imágenes basado en _deep learning_ a una comunidad más amplia mediante la eliminación de las barreras de entrada.
+
+\* excepto las segmentaciones mediante Cellpose, que se envían a un servidor remoto (con el permiso del usuario).
+
+Funciones básicas: **Anotador, Segmentador, Clasificador, Mediciones.**
+
+#### **Objetivo del ejercicio**
+
+En este ejercicio, se familiarizará con las principales funcionalidades de Piximi de anotación, segmentación, clasificación, medición y visualización y lo utilizará para analizar un conjunto de imágenes de muestra de un experimento de translocación. El objetivo de este experimento es determinar la **dosis efectiva más baja** de Wortmannin requerida para inducir la localización nuclear de FOXO1A etiquetada con GFP (Figura 31). Segmentará las imágenes utilizando uno de los modelos de _deep learning_ disponibles en Piximi. Comprobará y curará la segmentación y luego entrenará un clasificador de imágenes para clasificar las células individuales como teniendo «GFP nuclear», «GFP citoplasmática» o «sin GFP». Por último, realizará mediciones y las representará gráficamente para responder a la pregunta biológica.
+
+#### **Contexto del experimento de muestra**
+
+En este experimento, los investigadores tomaron imágenes de células U2OS de osteosarcoma (cáncer de hueso) fijadas que expresaban una proteína de fusión FOXO1A-GFP y tiñeron con DAPI para marcar los núcleos. FOXO1 es un factor de transcripción que desempeña un papel clave en la regulación de la gluconeogénesis y la glicogenólisis a través de la señalización de insulina. FOXO1A se desplaza dinámicamente entre el citoplasma y el núcleo en respuesta a diversos estímulos. Wortmannin, un inhibidor de PI3K, puede bloquear la exportación nuclear, lo que resulta en la acumulación de FOXO1A en el núcleo.
+
+
+
+

+
+_Representación esquemática del mecanismo de acción de FOXO1A._
+
+
+
+#### **Materiales necesarios para este ejercicio**
+
+Los materiales necesarios para este ejercicio pueden descargarse de: [PiximiTutorial](./downloads/Piximi_Translocation_Tutorial_RGB.zip). El archivo «Piximi Translocation Tutorial RGB.zip» contiene un proyecto de Piximi que incluye todas las imágenes, ya etiquetadas con el tratamiento correspondiente (concentración de Wortmannin o Control). ¡Descargue este archivo pero **NO lo descomprima**!
+
+#### **Instrucciones para el ejercicio**
+
+Lea los pasos que se indican a continuación y siga las instrucciones donde se indican. Los pasos en los que debe averiguar una solución están marcados con 🔴 PARA HACER.
+
+##### 1. **Cargar el proyecto Piximi**
+
+🔴 PARA HACER
+
+- Inicia Piximi en:[https://piximi.app/](https://piximi.app/)
+
+- Cargar el proyecto de ejemplo: Haga clic en «Abrir» \- “Proyecto” \- «Proyecto desde Zip», como se muestra en la figura 32 para cargar un archivo de proyecto para este tutorial desde Zip, y opcionalmente puede cambiar el nombre del proyecto en el panel superior izquierdo, como «Ejercicio Piximi». A medida que se carga, se puede ver la progresión en la esquina superior izquierda logotipo
.
+
+
+
+

+

+
+_Cargando el archivo de proyecto._
+
+
+
+##### 2. **Compruebe las imágenes cargadas y explore la interfaz Piximi**
+
+
+
+

+

+
+_Visualización de las imágenes del proyecto._
+
+
+
+Estas 17 imágenes representan tratamientos con Wortmannin a ocho concentraciones diferentes (expresadas en nM), así como tratamientos con sólo vehículo (0nM). Observe que el canal DAPI (Núcleos) se muestra en magenta y que el canal GFP (FOXOA1) se muestra en verde.
+
+Al pasar el cursor por encima de la imagen, aparecen etiquetas de color en la esquina izquierda de las imágenes. Estas anotaciones proceden de los metadatos del archivo comprimido que acabamos de cargar. En este tutorial, las etiquetas de diferentes colores indican la concentración de Wortmannin, mientras que los números representan el número de imágenes en cada categoría.
+
+Opcionalmente, puede anotar las imágenes manualmente haciendo clic en «+ Category», introduciendo su etiqueta, y luego seleccionando la imagen haciendo clic en las imágenes anotando las imágenes seleccionadas haciendo clic en **«Categorize»**. En este tutorial, nos saltaremos este paso ya que las etiquetas ya estaban cargadas al principio. Puede encontrar más información en la sección [Visor de proyectos](../detail/projectviewer.md) de los documentos.
+
+##### 3. **Segmentar Células - diferenciar las células del _background_**.
+
+🔴 PARA HACER
+
+- Para iniciar la predicción en todas las imágenes, haga clic en «Seleccionar todas las imágenes» en el panel superior.
+
+
+
+

+

+
+
+
+
+
+
+- Cambie la Tarea de Aprendizaje a «SEGMENTATION».
+
+
+
+

+

+
+
+
+
+
+
+- Haga clic en «+ LOAD MODEL» y aparecerá una ventana que le permitirá elegir un modelo pre-entrenado.
+
+
+
+

+

+
+
+
+
+
+
+- Para el ejercicio de hoy, seleccione «Cellpose». Puede encontrar más información sobre el modelo admitido [aquí](https://documentation.piximi.app/segmentation.html). Haga clic en «Open Segmentation Modeln» para cargar su modelo y seleccionarlo.
+
+
+
+

+

+
+
+
+
+
+
+- Por último, haga clic en «Predict model». Verá el progreso de la predicción en la esquina superior izquierda debajo del logo de Piximi. Tardará unos minutos en finalizar la segmentación.
+
+
+
+

+

+
+
+
+
+
+
+Tenga en cuenta que los pasos anteriores se realizaron en su computadora local, lo que significa que sus imágenes se almacenan localmente. Sin embargo, la inferencia de Cellpose se ejecuta en la nube, lo que significa que sus imágenes se cargarán para su procesamiento. Si sus imágenes son altamente sensibles, por favor tenga cuidado cuando utilice servicios basados en la nube.
+
+##### 4. **Visualice el resultado de la segmentación y corrija los errores de segmentación**
+
+🔴 PARA HACER
+
+- Haga clic en la pestaña **CELLPOSE_CELLS** para comprobar las células individuales que se han segmentado. Seleccione algunos objetos identificados o imágenes completas, luego haga clic en "Anotar" en la barra superior para verlos en el Visor de imágenes.
+
+
+
+

+

+
+_Visualización de las imágenes del proyecto en el Visor de imágenes._
+
+
+
+- Opcionalmente, aquí puede refinar manualmente la segmentación utilizando las herramientas del anotador. El anotador de Piximi ofrece varias opciones para **añadir**, **restar** o **interseccionar** anotaciones. Además, la **herramienta de selección** le permite **redimensionar** o **eliminar** anotaciones específicas. Para empezar a editar, seleccione imágenes específicas, o todas las imágenes, haciendo clic en la casilla de verificación de la parte superior.
+- Opcionalmente, puede ajustar los canales: Aunque hay dos canales en este experimento, la señal de los núcleos se duplicó en los canales rojo y verde. Este diseño está pensado para ser **color-blind friendly** y para producir un **color magenta** para los núcleos. El **canal verde** también incluye señales citoplasmáticas.
+
+Otra razón para duplicar los canales es que algunos modelos (como **Cellpose** que usamos hoy) requieren que las imágenes de entrada tengan **tres canales**.
+
+- Puede optar por segmentar manualmente las células para generar máscaras para los datos de 'verdad de referencia' (_ground truth_).
+
+##### 5. **Clasificar células**
+
+Razón para hacer esto: Queremos clasificar las “CELLPOSE_CELLS” basándonos en la distribución de la GFP (en Núcleos, citoplasma, o sin GFP) sin etiquetarlas todas y cada una manualmente. Para ello, podemos utilizar la función de clasificación en Piximi, que nos permite entrenar un clasificador utilizando un pequeño subconjunto de datos etiquetados y luego clasificar automáticamente las células restantes.
+
+🔴 PARA HACER
+
+- Ir a la pestaña **CELLPOSE_CELLS** que muestra los objetos segmentados.
+
+
+
+

+

+
+_Visualización del tipo «cellpose_cells»._
+
+
+
+- Hacer clic en la pestaña **Clasificación** del panel izquierdo.
+
+
+
+

+

+
+_Sección de clasificadores de Action Drawer._
+
+
+
+- Cree nuevas categorías haciendo clic en «+ Category». Añadir «Cytoplasmatic_GFP», «Nuclear_GFP», «No_GFP» tres categorías.
+
+
+
+

+

+
+_Botón Crear categoría._
+
+
+
+- Haga clic en las imágenes que coincidan con sus criterios. Puede seleccionar varias células manteniendo pulsado al tecla **Comamnd (⌘)** en Mac o **Shift** en Linux. Intenta asignar **~20-40 células por categoría**. Una vez seleccionadas, haz clic en **«Categorize»** para asignar las etiquetas a las células seleccionadas.
+
+
+
+

+

+
+_Clasificando células individuales en base a la presencia de GFP y su localización._
+
+
+
+##### 6. **Entrenar el modelo clasificador**
+
+🔴 PARA HACER
+
+- Haz clic en el icono «
- Fit Model» para abrir la configuración de los hiperparámetros del modelo. Para el ejercicio de hoy, ajustaremos algunos parámetros:
+- Haga clic en «Architecture Settings» y ajuste la _Model Architecture_ a **SimpleCNN**.
+- Actualice las dimensiones de entrada a
+
+ - Filas de entrada: 48
+ - Columnas de entrada: 48
+ - Canales: 3 (ya que nuestras imágenes están en formato RGB)
+
+ (Puede cambiar a otros números como 64, 128)
+
+- En la sección “Particionado de datos”, establezca el porcentaje de entrenamiento (_training percentage_) en 0,75, que reserva el 25% de los datos etiquetados para la validación.
+
+
+
+

+

+
+_Configuración del modelo clasificador._
+
+
+
+- Cuando haga clic en "**Fit Classifier**" en Piximi, aparecerán dos gráficos de entrenamiento "**Precisión vs Épocas**" y "**Pérdida vs Épocas**". Cada gráfico muestra curvas para datos de **entrenamiento** y **validación**.
+
+
+
+

+

+
+_Gráficos del historial de entrenamiento._
+
+
+
+- En el gráfico de **precisión**, verás lo bien que está aprendiendo el modelo. Lo ideal es que tanto la precisión de entrenamiento como la de validación aumenten y se mantengan cercanas.
+- En el gráfico de pérdidas, los valores más bajos significan un mejor rendimiento. Si la pérdida de validación empieza a aumentar mientras la pérdida de entrenamiento sigue cayendo, el modelo podría estar sobreajustándose.
+
+Estos gráficos le ayudan a comprender cómo está aprendiendo el modelo y si es necesario realizar ajustes.
+
+##### 7. **Evaluar el modelo:**
+
+🔴 PARA HACER
+
+
+
+

+

+
+_Evaluación de la ejecución de entrenamiento._
+
+
+
+- Haga clic en **«Predict model»** para aplicar el modelo que acabamos de entrenar. Este paso generará predicciones en las células que no hemos anotado.
+
+
+
+

+

+
+_Predecir clasificador._
+
+
+
+- Puede revisar las predicciones en la pestaña CELLPOSE_CELLS y eliminar cualquier categoría mal asignada.
+- Opcionalmente, puede seguir utilizando las etiquetas para refinar la verdad de referencia (_ground truth_) y mejorar el clasificador. Este proceso es parte de la clasificación **Human-in-the-loop**, donde se corrige iterativamente y entrenar el modelo basado en la entrada humana.
+- Haga clic en **«Evaluate model»** para evaluar el modelo que acabamos de entrenar. Las métricas de confusión y de evaluación pueden compararse con la verdad de referencia (_ground truth_).
+- Haga clic en «Accept Prediction (Hold)» (deberás mantener presionado el cursor unos segundos), para asignar las etiquetas predichas a todos los objetos.
+
+
+
+

+

+
+_Aceptar predicciones._
+
+
+
+##### 8. **Medición**
+
+Una vez que esté satisfecho con la clasificación, procederemos a medir los objetos. El objetivo del ejercicio de hoy es determinar la concentración mínima de Wortmannin necesaria para bloquear la exportación de FOXO1A-GFP desde los núcleos. Para ello, podemos medir la intensidad total de GFP a nivel de imagen o a nivel de objeto.
+
+🔴 PARA HACER
+
+- Haga clic en «Measurement» en la esquina superior derecha.
+
+
+
+

+

+
+_Navegar a Medidas._
+
+
+
+- Haga clic en "**+**" junto a «**Tables**», seleccione «**Image**» y haga clic en «**Confirm**». _Nota: La preparación de los datos para la medición puede tardar un tiempo_.
+
+
+
+

+

+
+_Crear tabla de medidas "**Image**"._
+
+
+
+- Haga clic en «Category» para incluir todas las categorías en la medición.
+- En «Total», haga clic en «Channell 1» para seleccionar la medición para GFP. Verá la medición en la pestaña «DATA GRID». Las mediciones se presentan como valores medios o medianos, y el conjunto de datos completo está disponible al exportar el archivo `.csv`.
+
+
+
+

+

+
+_Medidas calculadas._
+
+
+
+##### 9. **Visualización**
+
+Después de generar las mediciones, puede trazar las mediciones.
+
+🔴 PARA HACER
+
+- Haga clic en “**PLOTS**” para visualizar las mediciones.
+
+
+
+

+

+
+_Parcelas de medición._
+
+
+
+- Establezca el tipo de trazado en “**Swarm**” y elija un tema de color basado en su preferencia.
+- Seleccione “**Y-axis**” como “**intensity-total-channel-1**” y establezca “**SwarmGroup**” como “**category**”; esto generará una curva mostrando cómo varía la intensidad de GFP a través de diferentes categorías.
+- Seleccionando “**Show Statistics**” se mostrará la media, así como los límites de confianza superior e inferior, en el gráfico.
+- Opcionalmente, puede experimentar con diferentes tipos de gráficos y ejes para ver si los datos revelan información adicional.
+
+
+
+

+

+
+_Graficar los resultados._
+
+
+
+##### 10. **Exportar los resultados y guardar el proyecto**
+
+🔴 PARA HACER
+
+- Haz clic en «SAVE» en la esquina superior izquierda para guardar todo el proyecto. Verás la animación del logo de Piximi a medida que avanza el guardado
.
+
+##### 11. **Información adicional**
+
+Consulta el paper de Piximi: [https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2](https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2)
+
+Consulta la documentación de Piximi:[Documentación de Piximi](https://documentation.piximi.app/intro.html):[https://documentation.piximi.app/intro.html](https://documentation.piximi.app/intro.html)
+
+Informar de fallos/errores o solicitar características [https://github.com/piximi/documentation/issues](https://github.com/piximi/documentation/issues)
diff --git a/piximi-documentation/pages/tutorial/translocation_tutorial_pt_BR.md b/piximi-documentation/pages/tutorial/translocation_tutorial_pt_BR.md
new file mode 100644
index 0000000..969b587
--- /dev/null
+++ b/piximi-documentation/pages/tutorial/translocation_tutorial_pt_BR.md
@@ -0,0 +1,380 @@
+# Tutorial Para Iniciantes do Piximi (Portugues)
+
+> **Segmentação e classificação sem instalação no navegador**
+>
+> Beth Cimini, Le Liu, Esteban Miglietta, Paula Llanos, Nodar Gogoberidze
+>
+> Instituto Broad do MIT e Harvard, Cambridge, MA.
+
+### **Informações básicas:**
+
+#### **O que é Piximi?**
+
+Piximi é uma ferramenta moderna de análise de imagens sem programação que utiliza aprendizado profundo. Implementado como um aplicativo web em [https://piximi.app/](https://piximi.app/), o Piximi não requer instalação e pode ser acessado por qualquer navegador moderno. Sua arquitetura exclusiva para clientes preserva a segurança dos dados do pesquisador, executando toda a computação localmente.
+
+O Piximi é interoperável com ferramentas e fluxos de trabalho existentes, suportando importação e exportação de dados e formatos de modelos comuns. A interface intuitiva e o fácil acesso ao Piximi permitem que pesquisadores obtenham insights sobre imagens em apenas alguns minutos. O Piximi visa levar a análise de imagens com aprendizado profundo a uma comunidade mais ampla, eliminando barreiras.
+
+\* exceto as segmentações usando Cellpose, que são enviadas para um servidor remoto (com a permissão do usuário).
+
+Funcionalidades principais: **Anotador, Segmentador, Classificador, Medições.**
+
+#### **Objetivo do exercício**
+
+Neste exercício, você se familiarizará com as principais funcionalidades do Piximi: anotação, segmentação, classificação, mensuração e visualização, e o utilizará para analisar um conjunto de imagens de um experimento de translocação. O objetivo deste experimento é determinar a **menor dose efetiva** de Wortmannin necessária para induzir a localização nuclear de FOXO1A marcada com GFP (Figura 1). Você segmentará as imagens usando um dos modelos de aprendizado profundo disponíveis no Piximi, verificará e selecionará a segmentação e, em seguida, treinará um classificador de imagens para classificar as células individuais como tendo "GFP nuclear", "GFP citoplasmática" ou "sem GFP". Por fim, você fará medições e as plotará para responder à pergunta biológica.
+
+#### **Contexto do experimento**
+
+Neste experimento, pesquisadores obtiveram imagens de células U2OS de osteossarcoma (câncer ósseo) fixadas expressando uma proteína de fusão FOXO1A-GFP e coraram DAPI para marcar os núcleos. FOXO1 é um fator de transcrição que desempenha um papel fundamental na regulação da gliconeogênese e glicogenólise por meio da sinalização da insulina. FOXO1A transita dinamicamente entre o citoplasma e o núcleo em resposta a vários estímulos. A wortmanina, um inibidor da PI3K, pode bloquear a exportação nuclear, resultando no acúmulo de FOXO1A no núcleo.
+
+
+
+

+
+_Schematic representation of FOXO1A mechanism_
+
+
+
+#### **Materiais necessários para este exercício**
+
+Os materiais necessários para este exercício podem ser baixados de: [PiximiTutorial](./downloads/Piximi_Translocation_Tutorial_RGB.zip). O arquivo “Piximi Translocation Tutorial RGB.zip” contém um projeto Piximi, incluindo todas as imagens, já rotuladas com o tratamento correspondente (concentração de Wortmannin ou Controle). Baixe este arquivo, mas **NÃO o descompacte**!
+
+#### **Instruções do exercício**
+
+Leia os passos abaixo e siga as instruções onde indicado. Os passos em que você precisa encontrar uma solução estão marcados com 🔴 PARA FAZER.
+
+##### 1. **Carregue o projeto Piximi**
+
+🔴 PARA FAZER
+
+- Inicie o Piximi acessando: [https://piximi.app/](https://piximi.app/)
+
+- Carregue o projeto de exemplo: Clique em “Abrir” \- “Projeto” \- “Projeto do Zip”, para carregar um arquivo de projeto para este tutorial do Zip. Você também pode alterar o nome do projeto no painel superior esquerdo, como “Exercício Piximi”. Conforme ele é carregado, você pode ver a progressão no logotipo
no canto superior esquerdo.
+
+
+
+

+

+
+_Carregando um arquivo de projeto._
+
+
+
+##### 2. **Verifique as imagens carregadas e explore a interface do Piximi**
+
+
+
+

+

+
+_Explorando as imagens e rótulos._
+
+
+
+Estas 17 imagens representam tratamentos com Wortmannin em oito concentrações diferentes (expressas em nM), bem como tratamentos controles (0 nM). Observe que o canal DAPI (Núcleos) é mostrado em magenta e que o canal GFP (FOXOA1) é mostrado em verde.
+
+Ao passar o mouse sobre a imagem, rótulos coloridos são exibidos no canto esquerdo das imagens. Essas anotações são dos metadados do arquivo compactado que acabamos de enviar. Neste tutorial, os diferentes rótulos coloridos indicam a concentração de Wortmannin, enquanto os números representam o número de imagens em cada categoria.
+
+Opcionalmente, você pode anotar as imagens manualmente clicando em "+ Categoria", inserindo seu rótulo e, em seguida, selecionando a imagem clicando nas imagens e anotando as imagens selecionadas clicando em **"Categorizar"**. Neste tutorial, pularemos esta etapa, pois os rótulos já foram carregados no início. Mais informações podem ser encontradas na seção [Project Viewer](../detail/projectviewer.md) dos documentos.
+
+##### 3. **Segmentar Células - descubra as células a partir do fundo**
+
+🔴 PARA FAZER
+
+- Para iniciar a previsão em todas as imagens, clique em “Selecionar Todas as Imagens” no painel superior.
+
+
+
+

+

+
+
+
+
+
+
+- Altere a Tarefa de Aprendizagem para “SEGMENTAÇÃO”.
+
+
+
+

+

+
+
+
+
+
+
+- Clique em “+ CARREGAR MODELO” e a janela será exibida, permitindo que você escolha um modelo pré-treinado.
+
+
+
+

+

+
+
+
+
+
+
+- Para o exercício de hoje, selecione “Cellpose”. Mais informações sobre o modelo suportado podem ser encontradas [aqui](https://documentation.piximi.app/segmentation.html). Clique em “Abrir Modelo de Segmentação” para carregar seu modelo e selecioná-lo.
+
+
+
+

+

+
+
+
+
+
+
+- Por fim, clique em “Prever Modelo”. Você verá o progresso da previsão exibido no canto superior esquerdo, abaixo do logotipo da Piximi. A segmentação levará alguns minutos para ser concluída.
+
+
+
+

+

+
+
+
+
+
+
+Observe que as etapas anteriores foram executadas em sua máquina local, o que significa que suas imagens estão armazenadas localmente. No entanto, a inferência do Cellpose é executada na nuvem, o que significa que suas imagens serão enviadas para processamento. Se suas imagens forem altamente sensíveis, tenha cuidado ao usar serviços baseados em nuvem.
+
+##### 4. **Visualize o resultado da segmentação e corrija os erros de segmentação**
+
+🔴 PARA FAZER
+
+- Clique na aba **CELLPOSE_CELLS** para verificar as células individuais que foram segmentadas.
+ Selecione alguns objetos identificados ou imagens inteiras e clique em "Anotar" na barra superior para visualizá-los no ImageViewer.
+
+
+
+

+

+
+_Visualizando as imagens do projeto no Visualizador de Imagens._
+
+
+
+- Opcionalmente, aqui você pode refinar manualmente a segmentação usando as ferramentas do anotador. O anotador Piximi oferece diversas opções para **adicionar**, **subtrair** ou **interseccionar** anotações. Além disso, a **ferramenta de seleção** permite **redimensionar** ou **excluir** anotações específicas. Para começar a editar, selecione imagens específicas ou todas clicando na caixa de seleção na parte superior.
+- Opcionalmente, você pode ajustar os canais: embora existam dois canais neste experimento, o sinal dos núcleos é duplicado nos canais vermelho e verde. Este projeto foi projetado para ser **compatível com daltonismo** e produzir uma **cor magenta** para os núcleos. O **canal verde** também inclui sinais citoplasmáticos.
+
+Outro motivo para duplicar os canais é que alguns modelos — como o **modelo Cellpose** que usamos hoje — exigem uma entrada de **três canais**.
+
+- Você pode optar por segmentar manualmente as células para gerar máscaras para dados de verdade básica.
+
+##### **Classificar células**
+
+Motivo para isso: Queremos classificar as 'CELLPOSE_CELLS' com base na distribuição de GFP (em núcleos, citoplasma ou sem GFP) sem rotular todas elas manualmente. Para isso, podemos usar a função de classificação do Piximi, que nos permite treinar um classificador usando um pequeno subconjunto de dados rotulados e, em seguida, classificar automaticamente as células restantes.
+
+🔴 PARA FAZER
+
+- Acesse a aba **CELLPOSE_CELLS** que exibe os objetos segmentados.
+
+
+
+

+

+
+_Visualizando o tipo "cellpose_cells"._
+
+
+
+- Clique na aba **Classificação** no painel esquerdo.
+
+
+
+

+

+
+_Seção Classificador do Action Drawer._
+
+
+
+- Crie novas categorias clicando em **“+ Categoria”**. Adicione as três categorias “Cytoplasmatic_GFP”, “Nuclear_GFP” e “No_GFP”.
+
+
+
+

+

+
+_Botão Criar Categoria._
+
+
+
+- Clique nas imagens que correspondem aos seus critérios. Você pode selecionar várias células pressionando **Command (⌘)** no Mac ou **Shift** no Linux. Tente atribuir **\~20–40 células por categoria**. Após selecionar, clique em **“Categorizar”** para atribuir os rótulos às células selecionadas.
+
+
+
+

+

+
+_Classificação de células individuais com base na presença e localização de GFP._
+
+
+
+##### 6. **Treine o modelo do Classificador**
+
+🔴 PARA FAZER
+
+- Clique no ícone "
- Ajustar Modelo" para abrir as configurações de hiperparâmetros do modelo. Para o exercício de hoje, ajustaremos alguns parâmetros:
+- Clique em “Configurações de arquitetura” e defina a arquitetura do modelo como **SimpleCNN**.
+- Atualize as dimensões de entrada para:
+
+ - Linhas de entrada: 48
+ - Colunas de entrada: 48
+ - Canais: 3 (já que nossas imagens estão no formato RGB)
+
+ (Você pode mudar para outros números, como 64, 128)
+
+- Na seção “Particionamento de Dados”, defina a Porcentagem de Treinamento como 0,75, o que reserva 25% dos dados rotulados para validação.
+
+
+
+

+

+
+_Configuração do modelo classificador._
+
+
+
+- Ao clicar em "**Ajustar Classificador**" no Piximi, dois gráficos de treinamento aparecerão: "**Precisão vs. Épocas**" e "**Perda vs. Épocas**". Cada gráfico mostra curvas para os dados de **treinamento** e **validação**.
+
+
+
+

+

+
+_Gráficos do histórico de treinamento._
+
+
+
+- No **gráfico de precisão**, você verá o quão bem o modelo está aprendendo. Idealmente, a precisão tanto do treinamento quanto da validação deve aumentar e permanecer próxima.
+- No **gráfico de perdas**, valores menores significam melhor desempenho. Se a perda de validação começar a aumentar enquanto a perda de treinamento continua caindo, o modelo pode estar com sobreajuste.
+
+Esses gráficos ajudam a entender como o modelo está aprendendo e se ajustes são necessários.
+
+##### 7. **Avaliar modelo:**
+
+🔴 A FAZER
+
+
+
+

+

+
+_Avaliação da execução de treinamento._
+
+
+
+- Clique em "**Prever Modelo**" para aplicar o modelo que acabamos de treinar. Esta etapa gerará previsões nas células que não anotamos.
+
+
+
+

+

+
+_Prever classificador._
+
+
+
+- Você pode revisar as previsões na guia CELLPOSE_CELLS e excluir quaisquer categorias atribuídas incorretamente.
+- Opcionalmente, você pode continuar usando os rótulos para refinar a verdade básica e aprimorar o classificador. Esse processo faz parte da **classificação humana no ciclo**, na qual você corrige e treina o modelo iterativamente com base na entrada humana.
+- Clique em "**Avaliar Modelo**" para avaliar o modelo que acabamos de treinar. As métricas de confusão e de avaliação podem ser comparadas com a verdade básica.
+- Clique em "Aceitar previsão (Manter)" para atribuir os rótulos previstos a todos os objetos.
+
+
+
+

+

+
+_Aceitar previsões._
+
+
+
+##### 8. **Medição**
+
+Assim que estiver satisfeito com a classificação, prosseguiremos com a medição dos objetos. O objetivo do exercício de hoje é determinar a concentração mínima de Wortmannin necessária para bloquear a exportação de FOXO1A-GFP dos núcleos. Para isso, podemos medir a intensidade total de GFP na imagem ou no objeto.
+
+🔴 PARA FAZER
+
+- Clique em “Medição” no canto superior direito.
+
+
+
+

+

+
+_Navegar para Medidas._
+
+
+
+- Clique em "**+**" ao lado de «**Tabelas**» e selecione «**Imagem**» e clique em «**Confirmar**». _Observação: a preparação dos dados para a etapa de medição pode levar algum tempo para ser processada._
+
+
+
+

+

+
+_Criar tabela de medidas "**Image**"._
+
+
+
+- Clique em "Categoria" para incluir todas as categorias na medição.
+- Em "Total", clique em "Canal 1" para selecionar a medição para GFP. Você verá a medição na aba "GRADE DE DADOS". As medições são apresentadas como valores médios ou medianos, e o conjunto de dados completo está disponível ao exportar o arquivo .csv.
+
+
+
+

+

+
+_Medidas calculadas._
+
+
+
+##### 9. **Visualização**
+
+Após gerar as medições, você pode plotá-las.
+
+🔴 PARA FAZER
+
+- Clique em "**PLOTS**" para visualizar as medições.
+
+
+
+

+

+
+_Gráficos de medição._
+
+
+
+- Defina o tipo de gráfico como "**Swarm**" e escolha um tema de cores de acordo com sua preferência.
+- Selecione "**Y-axis**" como "**intensity-total-channel-1**" e defina "**SwarmGroup**" como "**category**"; isso gerará uma curva mostrando como a intensidade da GFP varia entre as diferentes categorias.
+- Selecionar "**Show Statistics**" exibirá a média, bem como os limites de confiança superior e inferior, no gráfico.
+- Opcionalmente, você pode experimentar diferentes tipos de gráfico e eixos para ver se os dados revelam insights adicionais.
+
+
+
+

+

+
+_Resultados do gráfico._
+
+
+
+##### 10. **Exportar resultados e salvar o projeto**
+
+🔴 PARA FAZER
+
+- Clique em "SALVAR" no canto superior esquerdo para salvar o projeto inteiro. Você verá a animação do logotipo do Piximi conforme o salvamento avança
.
+
+##### 11. **Informações de apoio**
+
+Confira o artigo do Piximi: [https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2](https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2)
+
+Confira a documentação do Piximi:[Documentação do Piximi](https://documentation.piximi.app/intro.html):[https://documentation.piximi.app/intro.html](https://documentation.piximi.app/intro.html)
+
+Relatar bugs/erros ou solicitar recursos [https://github.com/piximi/documentation/issues](https://github.com/piximi/documentation/issues)
diff --git a/piximi-documentation/quick-start.md b/piximi-documentation/quick-start.md
new file mode 100644
index 0000000..7725e18
--- /dev/null
+++ b/piximi-documentation/quick-start.md
@@ -0,0 +1,6 @@
+# Quick Start
+
+```{admonition} Under Constriction
+:class: note
+See one of our tutorials, such as the [beginner tutorial](./pages/tutorial/translocation_tutorial.md), for now to learn how to use Piximi.
+```
diff --git a/piximi-documentation/tmp.md b/piximi-documentation/tmp.md
new file mode 100644
index 0000000..2c7b9fd
--- /dev/null
+++ b/piximi-documentation/tmp.md
@@ -0,0 +1,17 @@
+## The Piximi Annotator
+
+
+
+The annotator within Piximi allows you to intuitively create annotations on your image of choice using a variety of tools. These tools include manual pen annotations in addition to more automatic methods with quick annotation. The Piximi annotator also works with **multichannel** and **multiplane** images, both of which can be easily selected to make sure that annotations are placed where they need to be. In future releases, we aim to also include z-plane interpolation to make annotating in 3D even easier.
+
+## The Piximi Classifier
+
+By using deep learning, Piximi can classify images of a variety of subject matter, such as bacteria, cultured cells, insects and more. The power of deep learning is applied upon a small number of images that have been categorized by the user in Piximi which then gives the computer a starting point on understanding what a particular category of image looks like. For example, the user can teach the computer what cells in G1, S, G2 or M-phases of the cell cycle look like. These user-made categorizations are then intensely examined by the deep learning model within Piximi through a process known as training. During this training process, the deep learning model finds patterns in the data in order to link the input (an image added by the user) to an output (the category, or class, defined by the user). The trained deep learning model can then predict on uncategorized images and determine which class they belong to. In the aforementioned example, this would determine which stage of the cell cycle a particular cell is in without relying on user input.
+
+Ultimately, Piximi offers users a way to have a highly customizable method of classifying large image sets across a range of subject matter by learning from annotations made by the user.
+
+## Piximi Next Steps
+
+Piximi is a work in progress and currently features an image annotator and deep learning-based classification of images. The ultimate aim of Piximi is to provide users with an intuitive application for the annotation, segmentation, classification and measurement of information present in images. In this phase of Piximi, we have released the annotator and classifier as these are important first steps in preparing to add the segmentation and measurement functionalities.
+
+We are developing Piximi in the open. If you have any features that you think that you would like to see in Piximi, please create a discussion in [our Piximi Github repo](https://github.com/piximi/piximi/discussions).
\ No newline at end of file
diff --git a/piximi-documentation/translocation_tutorial.md b/piximi-documentation/translocation_tutorial.md
deleted file mode 100644
index 32e8656..0000000
--- a/piximi-documentation/translocation_tutorial.md
+++ /dev/null
@@ -1,231 +0,0 @@
-# Piximi beginner tutorial (ENGLISH)
-
-## Installation-free segmentation and classification in the browser
-
-Beth Cimini, Le Liu, Esteban Miglietta, Paula Llanos, Nodar Gogoberidze
-
-Broad Institute of MIT and Harvard, Cambridge, MA.
-
-### **Background information:**
-
-#### **What is Piximi?**
-
-Piximi is a modern, no-programming image analysis tool leveraging deep learning. Implemented as a web application at [https://piximi.app/](https://piximi.app/), Piximi requires no installation and can be accessed by any modern web browser. Its client-only architecture preserves the security of researcher data by running all computation locally\*.
-
-Piximi is interoperable with existing tools and workflows by supporting import and export of common data and model formats. The intuitive interface and easy access to Piximi allows biological researchers to obtain insights into images within just a few minutes. Piximi aims to bring deep learning-powered image analysis to a broader community by eliminating barriers to entry.
-
-\* except for the segmentations using Cellpose, which are sent to a remote server (with the permission of the user).
-
-Core functionalities: **Annotator, Segmentor, Classifier, Measurments.**
-
-#### **Goal of the exercise**
-
-In this exercise, you will familiarize yourself with Piximi’s main functionalities of annotation, segmentation, classification, measurement and visualization and use it to analyze a sample image dataset from a translocation experiment. The goal of this experiment is to determine the **lowest effective dose** of Wortmannin required to induce GFP-tagged FOXO1A nuclear localization (Figure 21). You will segment the images using one of the deep learning models available in Piximi, check and curate the segmentation, then train an image classifier to classify the individual cells as having “nuclear-GFP”, “cytoplasmic-GFP” or “no-GFP”. Finally, you will make measurements and plot them to answer the biological question.
-
-
-#### **Context of the sample experiment**
-
-In this experiment, researchers imaged fixed U2OS osteosarcoma (bone cancer) cells expressing a FOXO1A-GFP fusion protein and stained DAPI to label the nuclei. FOXO1 is a transcription factor that plays a key role in regulating gluconeogenesis and glycogenolysis through insulin signaling. FOXO1A dynamically shuttles between the cytoplasm and nucleus in response to various stimuli. Wortmannin, a PI3K inhibitor, can block nuclear export, resulting in the accumulation of FOXO1A in the nucleus.
-
-
-```{figure} ./img/tutorial_images/Figure1.png
-:width: 300
-:align: center
-
-Schematic representation of FOXO1A mechanism
-```
-
-
-#### **Materials necessary for this exercise**
-
-The materials needed in this exercise can be downloaded from: [PiximiTutorial](./downloads/Piximi_Translocation_Tutorial_RGB.zip). The “Piximi Translocation Tutorial RGB.zip” file contains a Piximi project, including all the images, already labeled with the corresponding treatment (Wortmannin concentration or Control). Download this file but **do NOT unzip it**!
-
-#### **Exercise instructions**
-
-Read through the steps below and follow instructions where stated. Steps where you must figure out a solution are marked with 🔴 TO DO.
-
-##### 1. **Load the Piximi project**
-
-🔴 TO DO
-
-* Start Piximi by going to:[https://piximi.app/](https://piximi.app/)
-
-* Load the example project: Click “Open” \- “Project” \- “Project from Zip”, as shown in figure 22 to upload a project file for this tutorial from Zip, and you can optionally change the project name in the top left panel, such as “Piximi Exercise”. As it is loaded, you can see the progression in the top left corner logo
.
-
-```{figure} ./img/tutorial_images/Figure2.png
-:width: 600
-:align: center
-
-Loading a project file.
-```
-
-##### 2. **Check the loaded images and explore the Piximi interface**
-
-These 17 images represent Wortmannin treatments at eight different concentrations (expressed in nM), as well as mock treatments (0nM). Note the DAPI channel (Nuclei) is shown in magenta and that the GFP channel (FOXOA1) is shown in green.
-
-As you hover over the image, color labels are displayed on the left corner of the images. These annotations are from metadata in the zipped file we just uploaded. In this tutorial, the different colored labels indicate the concentration of Wortmannin, while the numbers represent the number of images in each category.
-
-Optionally, you can annotate the images manually by clicking “+ Category”, entering your label, and then selecting the image by clicking the images annotating the selected images by clicking **“Categorize”**. In this tutorial, we’ll skip this step since the labels were already uploaded at the beginning.
-
-```{figure} ./img/tutorial_images/Figure3.png
-:width: 600
-:align: center
-
-Exploring the images and labels.
-```
-
-##### 3. **Segment Cells - find out the cells from the background**
-
-
- 🔴 TO DO
-
-* To start the prediction on all images, click “Select All Images” in the top panel as shown in Figure 23.
-* Change the Learning Task to “SEGMENTATION” (Figure 24, Arrow 1).
-
-* Click on “+ LOAD MODEL” (Arrow 2) and the window will pop up, allowing you to choose a pretrained model (Arrow 3). For today’s exercise, select “Cellpose” (Arrow 4). More information about the model supported can be found [here](https://documentation.piximi.app/segmentation.html).
-* Click “Open Segmentation Model” (Arrow 5) to load your model and select it. Finally, click “Predict Model” (Arrow 5). You’ll see the prediction progress displayed in the top left corner beneath the Piximi logo
.
-* It will take a few minutes to finish the segmentation.
-
-
-```{figure} ./img/tutorial_images/Figure4.png
-:width: 600
-:align: center
-
-Loading a segmentation model.
-```
-
-Please note that the previous steps were performed on your local machine, meaning your images are stored locally. However, Cellpose inference runs in the cloud, which means your images will be uploaded for processing. If your images are highly sensitive, please exercise caution when using cloud-based services.
-
-##### 4. **Visualize segmentation result and fix the segmentation errors**
-
-🔴 TO DO
-
-* Click on the **CELLPOSE_CELLS** tab to check the individual cells that have been segmented. Click on the “IMAGE” tab and then “Annotate”, you can check the segmentation on the whole image.
-
-```{figure} ./img/tutorial_images/Figure5.png
-:width: 600
-:align: center
-
-Piximi's annotator tool.
-```
-
-* Optionally, here you can manually refine the segmentation using the annotator tools. The Piximi annotator provides several options to **add**, **subtract**, or **intersect** annotations. Additionally, the **selection tool** allows you to **resize** or **delete** specific annotations. To begin editing, select specific or all images by clicking the checkbox at the top.
-* Optionally, you can adjust channels: Although there are two channels in this experiment, the nuclei signal is duplicated in both the red and green channels. This design is intended to be **color-blind friendly** and to produce a **magenta color** for nuclei. The **green channel** also includes cytoplasmic signals.
-
-Another reason for duplicating the channels is that some models—such as the **Cellpose model** we used today—require a **three-channel** input.
-
-* You can choose to manually segment the cells to generate masks for ground truth data.
-
-##### **Classify cells**
-
-Reason for doing this: We want to classify the 'CELLPOSE\_CELLS' based on GFP distribution (on Nuclei, cytoplasm, or no GFP) without manually labeling all of them. To do this, we can use the classification function in Piximi, which allows us to train a classifier using a small subset of labeled data and then automatically classify the remaining cells.
-
- 🔴 TO DO
-
-* Go to the **CELLPOSE_CELLS** tab that displays the segmented objects (arrow 1, figure 26)
-* Click on the **Classification** tab on the left panel (arrow 2, figure 26).
-* Create new categories by clicking **“+ Category”**. Adding “Cytoplasmatic_GFP”, “Nuclear_GFP”, “No_GFP” three categories (Arrow 3, Figure 26).
-* Click on the images that match your criteria. You can select multiple cells by holding **Command (⌘)** on Mac or **Shift** on Linux. Aim to assign **\~20–40 cells per category**. Once selected, click **“Categorize”** to assign the labels to the selected cells.
-
-```{figure} ./img/tutorial_images/Figure6.png
-:width: 600
-:align: center
-
-Classifying individual cells based on GFP presence and localization.
-```
-
-##### 6. **Train the Classifier model**
-
- 🔴 TO DO
-
-* Click the ”
- Fit Model” icon to open the model hyperparameter settings. For today’s exercise, we’ll adjust a few parameters:
-* Click on “Architecture Settings” and set the Model Architecture to **SimpleCNN**.
-* Update the Input Dimensions to:
- - Input rows: 48
- - Input cols: 48
- - Channels: 3 (since our images are in RGB format)
-
- (You can change to other numbers such as 64, 128)
-
-```{figure} ./img/tutorial_images/Figure7.png
-:width: 600
-:align: center
-
-Classifier model setup.
-```
-
-* Click on the “Dataset Setting” tab and set the Training Percentage to 0.75, which reserves 25% of the labeled data for validation.
-* When you click **"Fit Classifier"** in Piximi, two training plots will appear “**Accuracy vs Epochs”** and **“Loss vs Epochs”**. Each plot shows curves for both **training** and **validation** data.
-* In the **accuracy plot**, you’ll see how well the model is learning. Ideally, both training and validation accuracy should increase and stay close.
-* In the **loss plot**, lower values mean better performance. If validation loss starts rising while training loss keeps dropping. g, the model might be overfitting.
-
-These plots help you understand how the model is learning and whether adjustments are needed.
-
-##### 7. **Evaluate model:**
-
- 🔴 TO DO
-
-```{figure} ./img/tutorial_images/Figure8.png
-:width: 400
-:align: center
-
-Classifier training and validation.
-```
-
-* Click **“Predict Model” (figure 28, arrow 1)** to apply the model we just trained. This step will generate predictions on the cells we did not annotate.
-* You can review the predictions in the CELLPOSE_CELLS tab and delete any wrongly assigned categories.
-* Optionally, you can continue using the labels to refine the ground truth and improve the classifier. This process is part of the **Human-in-the-loop classification**, where you iteratively correct and train the model based on human input.
-* Click **“Evaluate Model” (figure 28, arrow 2)** to evaluate the model we just trained. The confusion metrics and evaluation metrics can be compared to the ground truth.
-* Click "Accept Prediction (Hold)”, to assign the predicted labels to all the objects.
-
-##### 8. **Measurement**
-
-Once you are satisfied with the classification, we will proceed to measure the objects. The goal of today’s exercise is to determine the minimum concentration of Wortmannin required to block the export of FOXO1A-GFP from the nuclei. To do this, we can measure the total GFP intensity at either the image level or the object level.
-
- 🔴 TO DO
-
-* Click “Measurement” in the top right corner.
-* Click Tables (Arrow 1) and select Image and click “Confirm” (Arrow 2).
-* Choose "MEASUREMENT" in the left panel, note the measurement step may take some time to process.
-* Click on 'Category' to include all categories in the measurement.
-* "Under 'Total', click on 'Channel 1' (Arrow 3) to select the measurement for GFP. You will see the measurement in the “DATA GRID” tab. Measurements are presented as either mean or median values, and the full dataset is available upon exporting the .csv file.
-
-```{figure} ./img/tutorial_images/Figure9.png
-:width: 600
-:align: center
-
-Add measurements.
-```
-
-##### 9. **Visualization**
-
-After generating the measurements, you can plot the measurements.
-
- 🔴 TO DO
-
-* Click on 'PLOTS' (Figure 30, Arrow 1) to visualize the measurements.
-* Set the plot type to 'Swarm' and choose a color theme based on your preference.
-* Select 'Y-axis' as 'intensity-total-channel-1' and set 'SwarmGroup' to 'category'; this will generate a curve showing how GFP intensity varies across different categories (Figure 30, Arrow 2).
-* Selecting 'Show Statistics' will display the mean, as well as the upper and lower confidence boundaries, on the plot.
-* Optionally, you can experiment with different plot types and axes to see if the data reveals additional insights.
-
-```{figure} ./img/tutorial_images/Figure10.png
-:width: 600
-:align: center
-
-Plot results.
-```
-
-##### 10. **Export results and save the project**
-
- 🔴 TO DO
-
-* Click “SAVE” in the top left corner to save the entire project. You'll see the Piximi logo animation as the save progresses
.
-
-##### 11. **Supporting Information**
-
-Check out the Piximi paper: [https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2](https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2)
-
-Check out the Piximi documentation:[Piximi documentation](https://documentation.piximi.app/intro.html):[https://documentation.piximi.app/intro.html](https://documentation.piximi.app/intro.html)
-
-Report bugs/errors or request features [https://github.com/piximi/documentation/issues](https://github.com/piximi/documentation/issues)
diff --git a/piximi-documentation/translocation_tutorial_ES.md b/piximi-documentation/translocation_tutorial_ES.md
deleted file mode 100644
index a44baf9..0000000
--- a/piximi-documentation/translocation_tutorial_ES.md
+++ /dev/null
@@ -1,227 +0,0 @@
-# Tutorial inicial de Piximi (ESPAÑOL)
-
-## Segmentación y clasificación sin instalación en el navegador
-
-Beth Cimini, Le Liu, Esteban Miglietta, Paula Llanos, Nodar Gogoberidze
-
-Instituto Broad del MIT y Harvard, Cambridge, MA.
-
-### **Información general:**
-
-#### **¿Qué es Piximi?**
-
-Piximi es una herramienta moderna de análisis de imágenes tomando ventaja de varios métodos de *deep learning*, sin requerir conocimientos de programación. Implementado como una aplicación web en [https://piximi.app/](https://piximi.app/), Piximi no requiere instalación y se puede acceder desde cualquier navegador web moderno. Su arquitectura de cliente único preserva la seguridad de los datos del investigador ejecutando todos los cálculos localmente\*.
-
-Piximi es interoperable con herramientas y flujos de trabajo existentes, ya que admite la importación y exportación formatos de datos y modelos comunes. La interfaz intuitiva y el fácil acceso a Piximi permiten a los biólogos obtener información sobre las imágenes en tan sólo unos minutos. Piximi tiene como objetivo llevar el análisis de imágenes basado en *deep learning* a una comunidad más amplia mediante la eliminación de las barreras de entrada.
-
-\* excepto las segmentaciones mediante Cellpose, que se envían a un servidor remoto (con el permiso del usuario).
-
-Funciones básicas: **Anotador, Segmentador, Clasificador, Mediciones.**
-
-#### **Objetivo del ejercicio**
-
-En este ejercicio, se familiarizará con las principales funcionalidades de Piximi de anotación, segmentación, clasificación, medición y visualización y lo utilizará para analizar un conjunto de imágenes de muestra de un experimento de translocación. El objetivo de este experimento es determinar la **dosis efectiva más baja** de Wortmannin requerida para inducir la localización nuclear de FOXO1A etiquetada con GFP (Figura 31). Segmentará las imágenes utilizando uno de los modelos de *deep learning* disponibles en Piximi. Comprobará y curará la segmentación y luego entrenará un clasificador de imágenes para clasificar las células individuales como teniendo «GFP nuclear», «GFP citoplasmática» o «sin GFP». Por último, realizará mediciones y las representará gráficamente para responder a la pregunta biológica.
-
-#### **Contexto del experimento de muestra**
-
-En este experimento, los investigadores tomaron imágenes de células U2OS de osteosarcoma (cáncer de hueso) fijadas que expresaban una proteína de fusión FOXO1A-GFP y tiñeron con DAPI para marcar los núcleos. FOXO1 es un factor de transcripción que desempeña un papel clave en la regulación de la gluconeogénesis y la glicogenólisis a través de la señalización de insulina. FOXO1A se desplaza dinámicamente entre el citoplasma y el núcleo en respuesta a diversos estímulos. Wortmannin, un inhibidor de PI3K, puede bloquear la exportación nuclear, lo que resulta en la acumulación de FOXO1A en el núcleo.
-
-```{figure} ./img/tutorial_images/Figure1.png
-:width: 300
-:align: center
-
-Representación esquemática del mecanismo de acción de FOXO1A.
-```
-
-#### **Materiales necesarios para este ejercicio**
-
-Los materiales necesarios para este ejercicio pueden descargarse de: [PiximiTutorial](./downloads/Piximi_Translocation_Tutorial_RGB.zip). El archivo «Piximi Translocation Tutorial RGB.zip» contiene un proyecto de Piximi que incluye todas las imágenes, ya etiquetadas con el tratamiento correspondiente (concentración de Wortmannin o Control). ¡Descargue este archivo pero **NO lo descomprima**!
-
-#### **Instrucciones para el ejercicio**
-
-Lea los pasos que se indican a continuación y siga las instrucciones donde se indican. Los pasos en los que debe averiguar una solución están marcados con 🔴 PARA HACER.
-
-##### 1. **Cargar el proyecto Piximi**
-
-🔴 PARA HACER
-
-* Inicia Piximi en:[https://piximi.app/](https://piximi.app/)
-
-* Cargar el proyecto de ejemplo: Haga clic en «Abrir» \- “Proyecto” \- «Proyecto desde Zip», como se muestra en la figura 32 para cargar un archivo de proyecto para este tutorial desde Zip, y opcionalmente puede cambiar el nombre del proyecto en el panel superior izquierdo, como «Ejercicio Piximi». A medida que se carga, se puede ver la progresión en la esquina superior izquierda logotipo
.
-
-
-```{figure} ./img/tutorial_images/Figure2.png
-:width: 600
-:align: center
-
-Cargando el archivo de proyecto.
-```
-
-##### 2. **Compruebe las imágenes cargadas y explore la interfaz Piximi**
-
-Estas 17 imágenes representan tratamientos con Wortmannin a ocho concentraciones diferentes (expresadas en nM), así como tratamientos con sólo vehículo (0nM). Observe que el canal DAPI (Núcleos) se muestra en magenta y que el canal GFP (FOXOA1) se muestra en verde.
-
-Al pasar el cursor por encima de la imagen, aparecen etiquetas de color en la esquina izquierda de las imágenes. Estas anotaciones proceden de los metadatos del archivo comprimido que acabamos de cargar. En este tutorial, las etiquetas de diferentes colores indican la concentración de Wortmannin, mientras que los números representan el número de imágenes en cada categoría.
-
-Opcionalmente, puede anotar las imágenes manualmente haciendo clic en «+ Category», introduciendo su etiqueta, y luego seleccionando la imagen haciendo clic en las imágenes anotando las imágenes seleccionadas haciendo clic en **«Categorize»**. En este tutorial, nos saltaremos este paso ya que las etiquetas ya estaban cargadas al principio.
-
-```{figure} ./img/tutorial_images/Figure3.png
-:width: 600
-:align: center
-
-Explorando las imágenes y etiquetas.
-```
-
-##### 3. **Segmentar Células - diferenciar las células del *background***.
-
- 🔴 PARA HACER
-
-* Para iniciar la predicción en todas las imágenes, haga clic en «Seleccionar todas las imágenes» en el panel superior, como se muestra en la Figura 33.
-* Cambie la Tarea de Aprendizaje a «SEGMENTATION» (Figura 34, Flecha 1).
-* Haga clic en «+ LOAD MODEL» (Flecha 2) y aparecerá una ventana que le permitirá elegir un modelo pre-entrenado (Flecha 3). Para el ejercicio de hoy, seleccione «Cellpose» (Flecha 4). Puede encontrar más información sobre el modelo admitido [aquí](https://documentation.piximi.app/segmentation.html).
-* Haga clic en «Open Segmentation Modeln» (Flecha 5\) para cargar su modelo y seleccionarlo. Por último, haga clic en «Predict model» (Flecha 5). Verá el progreso de la predicción en la esquina superior izquierda debajo del logo de Piximi
.
-* Tardará unos minutos en finalizar la segmentación.
-
-
-```{figure} ./img/tutorial_images/Figure4.png
-:width: 600
-:align: center
-
-Cargando un modelo de segmentación.
-```
-
-Tenga en cuenta que los pasos anteriores se realizaron en su computadora local, lo que significa que sus imágenes se almacenan localmente. Sin embargo, la inferencia de Cellpose se ejecuta en la nube, lo que significa que sus imágenes se cargarán para su procesamiento. Si sus imágenes son altamente sensibles, por favor tenga cuidado cuando utilice servicios basados en la nube.
-
-##### 4. **Visualice el resultado de la segmentación y corrija los errores de segmentación**
-
- 🔴 PARA HACER
-
-* Haga clic en la pestaña **CELLPOSE_CELLS** para comprobar las células individuales que se han segmentado. Haga clic en la pestaña «IMAGE» y luego en «Annotate», puede comprobar la segmentación en toda la imagen.
-
-```{figure} ./img/tutorial_images/Figure5.png
-:width: 600
-:align: center
-
-La herramienta de anotación de Piximi.
-```
-
-* Opcionalmente, aquí puede refinar manualmente la segmentación utilizando las herramientas del anotador. El anotador de Piximi ofrece varias opciones para **añadir**, **restar** o **interseccionar** anotaciones. Además, la **herramienta de selección** le permite **redimensionar** o **eliminar** anotaciones específicas. Para empezar a editar, seleccione imágenes específicas, o todas las imágenes, haciendo clic en la casilla de verificación de la parte superior.
-* Opcionalmente, puede ajustar los canales: Aunque hay dos canales en este experimento, la señal de los núcleos se duplicó en los canales rojo y verde. Este diseño está pensado para ser **color-blind friendly** y para producir un **color magenta** para los núcleos. El **canal verde** también incluye señales citoplasmáticas.
-
-Otra razón para duplicar los canales es que algunos modelos (como **Cellpose** que usamos hoy) requieren que las imágenes de entrada tengan **tres canales**.
-
-* Puede optar por segmentar manualmente las células para generar máscaras para los datos de 'verdad de referencia' (*ground truth*).
-
-##### 5. **Clasificar células**
-
-Razón para hacer esto: Queremos clasificar las “CELLPOSE_CELLS” basándonos en la distribución de la GFP (en Núcleos, citoplasma, o sin GFP) sin etiquetarlas todas y cada una manualmente. Para ello, podemos utilizar la función de clasificación en Piximi, que nos permite entrenar un clasificador utilizando un pequeño subconjunto de datos etiquetados y luego clasificar automáticamente las células restantes.
-
- 🔴 PARA HACER
-
-* Ir a la pestaña **CELLPOSE_CELLS** que muestra los objetos segmentados (flecha 1, figura 36).
-* Hacer clic en la pestaña **Clasificación** del panel izquierdo (flecha 2, figura 36).
-* Cree nuevas categorías haciendo clic en «+ Category». Añadir «Cytoplasmatic_GFP», «Nuclear_GFP», «No_GFP» tres categorías (flecha 3, figura 36).
-* Haga clic en las imágenes que coincidan con sus criterios. Puede seleccionar varias células manteniendo pulsado al tecla **Comamnd (⌘)** en Mac o **Shift** en Linux. Intenta asignar **~20-40 células por categoría**. Una vez seleccionadas, haz clic en **«Categorize»** para asignar las etiquetas a las células seleccionadas.
-
-```{figure} ./img/tutorial_images/Figure6.png
-:width: 600
-:align: center
-
-Clasificando células individuales en base a la presencia de GFP y su localización.
-```
-
-##### 6. **Entrenar el modelo clasificador**
-
- 🔴 PARA HACER
-
-* Haz clic en el icono «
- Fit Model» para abrir la configuración de los hiperparámetros del modelo. Para el ejercicio de hoy, ajustaremos algunos parámetros:
-* Haga clic en «Architecture Settings» y ajuste la *Model Architecture* a **SimpleCNN**.
-* Actualice las dimensiones de entrada a
- - Filas de entrada: 48
- - Columnas de entrada: 48
- - Canales: 3 (ya que nuestras imágenes están en formato RGB)
-
- (Puede cambiar a otros números como 64, 128)
-
-```{figure} ./img/tutorial_images/Figure7.png
-:width: 600
-:align: center
-
-Configuración del modelo clasificador.
-```
-
-* Haga clic en la pestaña «Dataset Setting» y establezca el porcentaje de entrenamiento (*training percentage*) en 0,75, que reserva el 25% de los datos etiquetados para la validación.
-* Cuando haga clic en **"Fit Classifier"** en Piximi, aparecerán dos gráficos de entrenamiento "**Precisión vs Épocas "** y **"Pérdida vs Épocas "**. Cada gráfico muestra curvas para datos de **entrenamiento** y **validación**.
-* En el gráfico de **precisión**, verás lo bien que está aprendiendo el modelo. Lo ideal es que tanto la precisión de entrenamiento como la de validación aumenten y se mantengan cercanas.
-* En el gráfico de pérdidas, los valores más bajos significan un mejor rendimiento. Si la pérdida de validación empieza a aumentar mientras la pérdida de entrenamiento sigue cayendo, el modelo podría estar sobreajustándose.
-
-Estos gráficos le ayudan a comprender cómo está aprendiendo el modelo y si es necesario realizar ajustes.
-
-##### 7. **Evaluar el modelo:**
-
-🔴 PARA HACER
-
-```{figure} ./img/tutorial_images/Figure8.png
-:width: 400
-:align: center
-
-Entrenamiento y validación del clasificador.
-```
-
-* Haga clic en **«Predict model» (figura 38, flecha 1)** para aplicar el modelo que acabamos de entrenar. Este paso generará predicciones en las células que no hemos anotado.
-* Puede revisar las predicciones en la pestaña CELLPOSE_CELLS y eliminar cualquier categoría mal asignada.
-* Opcionalmente, puede seguir utilizando las etiquetas para refinar la verdad de referencia (*ground truth*) y mejorar el clasificador. Este proceso es parte de la clasificación **Human-in-the-loop**, donde se corrige iterativamente y entrenar el modelo basado en la entrada humana.
-* Haga clic en **«Evaluate model» (figura 38, flecha 2)** para evaluar el modelo que acabamos de entrenar. Las métricas de confusión y de evaluación pueden compararse con la verdad de referencia (*ground truth*).
-* Haga clic en «Accept Prediction (Hold)» (deberás mantener presionado el cursor unos segundos), para asignar las etiquetas predichas a todos los objetos.
-
-##### 8. **Medición**
-
-Una vez que esté satisfecho con la clasificación, procederemos a medir los objetos. El objetivo del ejercicio de hoy es determinar la concentración mínima de Wortmannin necesaria para bloquear la exportación de FOXO1A-GFP desde los núcleos. Para ello, podemos medir la intensidad total de GFP a nivel de imagen o a nivel de objeto.
-
-🔴 PARA HACER
-
-* Haga clic en «Measurement» en la esquina superior derecha.
-* Hacer clic en Tablas (Flecha 1) y seleccionar Imagen y hacer clic en «Confirm» (Flecha 2).
-* Elija «MEASUREMENT» en el panel izquierdo, tenga en cuenta que el paso de medición puede tardar algún tiempo en procesarse.
-* Haga clic en «Category» para incluir todas las categorías en la medición.
-* En «Total», haga clic en «Channell 1» (Flecha 3) para seleccionar la medición para GFP. Verá la medición en la pestaña «DATA GRID». Las mediciones se presentan como valores medios o medianos, y el conjunto de datos completo está disponible al exportar el archivo .csv.
-
-```{figure} ./img/tutorial_images/Figure9.png
-:width: 600
-:align: center
-
-Agregar mediciones.
-```
-
-##### 9. **Visualización**
-
-Después de generar las mediciones, puede trazar las mediciones.
-
-🔴 PARA HACER
-
-* Haga clic en “PLOTS” (Figura 40, Flecha 1) para visualizar las mediciones.
-* Establezca el tipo de trazado en “Swarm” y elija un tema de color basado en su preferencia.
-* Seleccione “Y-axis” como “intensity-total-channel-1” y establezca “SwarmGroup” como “category”; esto generará una curva mostrando cómo varía la intensidad de GFP a través de diferentes categorías (Figura 40, Flecha 2).
-* Seleccionando “Show Statistics” se mostrará la media, así como los límites de confianza superior e inferior, en el gráfico.
-* Opcionalmente, puede experimentar con diferentes tipos de gráficos y ejes para ver si los datos revelan información adicional.
-
-```{figure} ./img/tutorial_images/Figure10.png
-:width: 600
-:align: center
-
-Graficar los resultados.
-```
-
-##### 10. **Exportar los resultados y guardar el proyecto**
-
-🔴 PARA HACER
-
-* Haz clic en «SAVE» en la esquina superior izquierda para guardar todo el proyecto. Verás la animación del logo de Piximi a medida que avanza el guardado
.
-
-##### 11. **Información adicional**
-
-Consulta el paper de Piximi: [https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2](https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2)
-
-Consulta la documentación de Piximi:[Documentación de Piximi](https://documentation.piximi.app/intro.html):[https://documentation.piximi.app/intro.html](https://documentation.piximi.app/intro.html)
-
-Informar de fallos/errores o solicitar características [https://github.com/piximi/documentation/issues](https://github.com/piximi/documentation/issues)
\ No newline at end of file
diff --git a/piximi-documentation/translocation_tutorial_pt_BR.md b/piximi-documentation/translocation_tutorial_pt_BR.md
deleted file mode 100644
index 0a08937..0000000
--- a/piximi-documentation/translocation_tutorial_pt_BR.md
+++ /dev/null
@@ -1,230 +0,0 @@
-# Tutorial para iniciantes do Piximi (Portugues)
-
-## Segmentação e classificação sem instalação no navegador
-
-Beth Cimini, Le Liu, Esteban Miglietta, Paula Llanos, Nodar Gogoberidze
-
-Instituto Broad do MIT e Harvard, Cambridge, MA.
-
-### **Informações básicas:**
-
-#### **O que é Piximi?**
-
-Piximi é uma ferramenta moderna de análise de imagens sem programação que utiliza aprendizado profundo. Implementado como um aplicativo web em [https://piximi.app/](https://piximi.app/), o Piximi não requer instalação e pode ser acessado por qualquer navegador moderno. Sua arquitetura exclusiva para clientes preserva a segurança dos dados do pesquisador, executando toda a computação localmente.
-
-O Piximi é interoperável com ferramentas e fluxos de trabalho existentes, suportando importação e exportação de dados e formatos de modelos comuns. A interface intuitiva e o fácil acesso ao Piximi permitem que pesquisadores obtenham insights sobre imagens em apenas alguns minutos. O Piximi visa levar a análise de imagens com aprendizado profundo a uma comunidade mais ampla, eliminando barreiras.
-
-
-\* exceto as segmentações usando Cellpose, que são enviadas para um servidor remoto (com a permissão do usuário).
-
-Funcionalidades principais: **Anotador, Segmentador, Classificador, Medições.**
-
-#### **Objetivo do exercício**
-
-Neste exercício, você se familiarizará com as principais funcionalidades do Piximi: anotação, segmentação, classificação, mensuração e visualização, e o utilizará para analisar um conjunto de imagens de um experimento de translocação. O objetivo deste experimento é determinar a **menor dose efetiva** de Wortmannin necessária para induzir a localização nuclear de FOXO1A marcada com GFP (Figura 21). Você segmentará as imagens usando um dos modelos de aprendizado profundo disponíveis no Piximi, verificará e selecionará a segmentação e, em seguida, treinará um classificador de imagens para classificar as células individuais como tendo "GFP nuclear", "GFP citoplasmática" ou "sem GFP". Por fim, você fará medições e as plotará para responder à pergunta biológica.
-
-
-#### **Contexto do experimento**
-
-Neste experimento, pesquisadores obtiveram imagens de células U2OS de osteossarcoma (câncer ósseo) fixadas expressando uma proteína de fusão FOXO1A-GFP e coraram DAPI para marcar os núcleos. FOXO1 é um fator de transcrição que desempenha um papel fundamental na regulação da gliconeogênese e glicogenólise por meio da sinalização da insulina. FOXO1A transita dinamicamente entre o citoplasma e o núcleo em resposta a vários estímulos. A wortmanina, um inibidor da PI3K, pode bloquear a exportação nuclear, resultando no acúmulo de FOXO1A no núcleo.
-
-
-
-```{figure} ./img/tutorial_images/Figure1.png
-:largura: 300
-:alinhar: centro
-
-Representação esquemática do mecanismo FOXO1A
-```
-
-#### **Materiais necessários para este exercício**
-
-Os materiais necessários para este exercício podem ser baixados de: [PiximiTutorial](./downloads/Piximi_Translocation_Tutorial_RGB.zip). O arquivo “Piximi Translocation Tutorial RGB.zip” contém um projeto Piximi, incluindo todas as imagens, já rotuladas com o tratamento correspondente (concentração de Wortmannin ou Controle). Baixe este arquivo, mas **NÃO o descompacte**!
-
-#### **Instruções do exercício**
-
-Leia os passos abaixo e siga as instruções onde indicado. Os passos em que você precisa encontrar uma solução estão marcados com 🔴 PARA FAZER.
-
-##### 1. **Carregue o projeto Piximi**
-
-🔴 PARA FAZER
-
-* Inicie o Piximi acessando: [https://piximi.app/](https://piximi.app/)
-
-* Carregue o projeto de exemplo: Clique em “Abrir” \- “Projeto” \- “Projeto do Zip”, como mostrado na figura 22, para carregar um arquivo de projeto para este tutorial do Zip. Você também pode alterar o nome do projeto no painel superior esquerdo, como “Exercício Piximi”. Conforme ele é carregado, você pode ver a progressão no logotipo
no canto superior esquerdo.
-
-```{figure} ./img/tutorial_images/Figure2.png
-:width: 600
-:align: center
-
-Carregando um arquivo de projeto.
-```
-
-##### 2. **Verifique as imagens carregadas e explore a interface do Piximi**
-
-Estas 17 imagens representam tratamentos com Wortmannin em oito concentrações diferentes (expressas em nM), bem como tratamentos controles (0 nM). Observe que o canal DAPI (Núcleos) é mostrado em magenta e que o canal GFP (FOXOA1) é mostrado em verde.
-
-Ao passar o mouse sobre a imagem, rótulos coloridos são exibidos no canto esquerdo das imagens. Essas anotações são dos metadados do arquivo compactado que acabamos de enviar. Neste tutorial, os diferentes rótulos coloridos indicam a concentração de Wortmannin, enquanto os números representam o número de imagens em cada categoria.
-
-Opcionalmente, você pode anotar as imagens manualmente clicando em "+ Categoria", inserindo seu rótulo e, em seguida, selecionando a imagem clicando nas imagens e anotando as imagens selecionadas clicando em **"Categorizar"**. Neste tutorial, pularemos esta etapa, pois os rótulos já foram carregados no início.
-
-```{figure} ./img/tutorial_images/Figure3.png
-:largura: 600
-:alinhar: centro
-
-Explorando as imagens e rótulos.
-```
-
-##### 3. **Segmentar Células - descubra as células a partir do fundo**
-
-🔴 PARA FAZER
-
-* Para iniciar a previsão em todas as imagens, clique em “Selecionar Todas as Imagens” no painel superior, conforme mostrado na Figura 23.
-* Altere a Tarefa de Aprendizagem para “SEGMENTAÇÃO” (Figura 24, Seta 1).
-
-* Clique em “+ CARREGAR MODELO” (Seta 2) e a janela será exibida, permitindo que você escolha um modelo pré-treinado (Seta 3). Para o exercício de hoje, selecione “Cellpose” (Seta 4). Mais informações sobre o modelo suportado podem ser encontradas [aqui](https://documentation.piximi.app/segmentation.html).
-* Clique em “Abrir Modelo de Segmentação” (Seta 5) para carregar seu modelo e selecioná-lo. Por fim, clique em “Prever Modelo” (Seta 5). Você verá o progresso da previsão exibido no canto superior esquerdo, abaixo do logotipo da Piximi
.
-* A segmentação levará alguns minutos para ser concluída.
-
-```{figure} ./img/tutorial_images/Figure4.png
-:width: 600
-:align: center
-
-Carregando um modelo de segmentação.
-```
-
-Observe que as etapas anteriores foram executadas em sua máquina local, o que significa que suas imagens estão armazenadas localmente. No entanto, a inferência do Cellpose é executada na nuvem, o que significa que suas imagens serão enviadas para processamento. Se suas imagens forem altamente sensíveis, tenha cuidado ao usar serviços baseados em nuvem.
-
-##### 4. **Visualize o resultado da segmentação e corrija os erros de segmentação**
-
-🔴 PARA FAZER
-
-* Clique na aba **CELLPOSE_CELLS** para verificar as células individuais que foram segmentadas. Clique na aba “IMAGEM” e depois em “Anotar” para verificar a segmentação de toda a imagem.
-
-```{figure} ./img/tutorial_images/Figure5.png
-:largura: 600
-:alinhar: centro
-
-Ferramenta de anotação do Piximi.
-```
-
-* Opcionalmente, aqui você pode refinar manualmente a segmentação usando as ferramentas do anotador. O anotador Piximi oferece diversas opções para **adicionar**, **subtrair** ou **interseccionar** anotações. Além disso, a **ferramenta de seleção** permite **redimensionar** ou **excluir** anotações específicas. Para começar a editar, selecione imagens específicas ou todas clicando na caixa de seleção na parte superior.
-* Opcionalmente, você pode ajustar os canais: embora existam dois canais neste experimento, o sinal dos núcleos é duplicado nos canais vermelho e verde. Este projeto foi projetado para ser **compatível com daltonismo** e produzir uma **cor magenta** para os núcleos. O **canal verde** também inclui sinais citoplasmáticos.
-
-Outro motivo para duplicar os canais é que alguns modelos — como o **modelo Cellpose** que usamos hoje — exigem uma entrada de **três canais**.
-
-* Você pode optar por segmentar manualmente as células para gerar máscaras para dados de verdade básica.
-
-##### **Classificar células**
-
-Motivo para isso: Queremos classificar as 'CELLPOSE\_CELLS' com base na distribuição de GFP (em núcleos, citoplasma ou sem GFP) sem rotular todas elas manualmente. Para isso, podemos usar a função de classificação do Piximi, que nos permite treinar um classificador usando um pequeno subconjunto de dados rotulados e, em seguida, classificar automaticamente as células restantes.
-
-🔴 PARA FAZER
-
-* Acesse a aba **CELLPOSE_CELLS** que exibe os objetos segmentados (seta 1, figura 26)
-* Clique na aba **Classificação** no painel esquerdo (seta 2, figura 26).
-* Crie novas categorias clicando em **“+ Categoria”**. Adicione as três categorias “Cytoplasmatic_GFP”, “Nuclear_GFP” e “No_GFP” (Seta 3, Figura 26).
-* Clique nas imagens que correspondem aos seus critérios. Você pode selecionar várias células pressionando **Command (⌘)** no Mac ou **Shift** no Linux. Tente atribuir **\~20–40 células por categoria**. Após selecionar, clique em **“Categorizar”** para atribuir os rótulos às células selecionadas.
-
-```{figure} ./img/tutorial_images/Figure6.png
-:width: 600
-:align: center
-
-Classificando células individuais com base na presença e localização de GFP.
-```
-
-##### 6. **Treine o modelo do Classificador**
-
-🔴 PARA FAZER
-
-* Clique no ícone "
- Ajustar Modelo" para abrir as configurações de hiperparâmetros do modelo. Para o exercício de hoje, ajustaremos alguns parâmetros:
-* Clique em “Configurações de arquitetura” e defina a arquitetura do modelo como **SimpleCNN**.
-* Atualize as dimensões de entrada para:
- - Linhas de entrada: 48
- - Colunas de entrada: 48
- - Canais: 3 (já que nossas imagens estão no formato RGB)
-
- (Você pode mudar para outros números, como 64, 128)
-
-```{figure} ./img/tutorial_images/Figure7.png
-:largura: 600
-:alinhar: centro
-
-Configuração do modelo classificador.
-```
-
-* Clique na aba “Configuração do Conjunto de Dados” e defina a Porcentagem de Treinamento como 0,75, o que reserva 25% dos dados rotulados para validação.
-* Ao clicar em **"Ajustar Classificador"** no Piximi, dois gráficos de treinamento aparecerão: "**Precisão vs. Épocas'' e **"Perda vs. Épocas''. Cada gráfico mostra curvas para os dados de **treinamento** e **validação**.
-* No **gráfico de precisão**, você verá o quão bem o modelo está aprendendo. Idealmente, a precisão tanto do treinamento quanto da validação deve aumentar e permanecer próxima.
-* No **gráfico de perdas**, valores menores significam melhor desempenho. Se a perda de validação começar a aumentar enquanto a perda de treinamento continua caindo, o modelo pode estar com sobreajuste.
-
-Esses gráficos ajudam a entender como o modelo está aprendendo e se ajustes são necessários.
-
-##### 7. **Avaliar modelo:**
-
-🔴 A FAZER
-
-```{figure} ./img/tutorial_images/Figure8.png
-:width: 400
-:align: center
-
-Treinamento e validação do classificador.
-```
-
-* Clique em **“Prever Modelo” (figura 28, seta 1)** para aplicar o modelo que acabamos de treinar. Esta etapa gerará previsões nas células que não anotamos.
-* Você pode revisar as previsões na guia CELLPOSE_CELLS e excluir quaisquer categorias atribuídas incorretamente.
-* Opcionalmente, você pode continuar usando os rótulos para refinar a verdade básica e aprimorar o classificador. Esse processo faz parte da **classificação humana no ciclo**, na qual você corrige e treina o modelo iterativamente com base na entrada humana.
-* Clique em **“Avaliar Modelo” (figura 28, seta 2)** para avaliar o modelo que acabamos de treinar. As métricas de confusão e de avaliação podem ser comparadas com a verdade básica.
-* Clique em "Aceitar previsão (Manter)" para atribuir os rótulos previstos a todos os objetos.
-
-##### 8. **Medição**
-
-Assim que estiver satisfeito com a classificação, prosseguiremos com a medição dos objetos. O objetivo do exercício de hoje é determinar a concentração mínima de Wortmannin necessária para bloquear a exportação de FOXO1A-GFP dos núcleos. Para isso, podemos medir a intensidade total de GFP na imagem ou no objeto.
-
-🔴 PARA FAZER
-
-* Clique em “Medição” no canto superior direito.
-* Clique em Tabelas (Seta 1), selecione Imagem e clique em “Confirmar” (Seta 2).
-* Selecione "MEDIÇÃO" no painel esquerdo. Observe que a etapa de medição pode levar algum tempo para ser processada.
-* Clique em "Categoria" para incluir todas as categorias na medição.
-* Em "Total", clique em "Canal 1" (Seta 3) para selecionar a medição para GFP. Você verá a medição na aba "GRADE DE DADOS". As medições são apresentadas como valores médios ou medianos, e o conjunto de dados completo está disponível ao exportar o arquivo .csv.
-
-```{figure} ./img/tutorial_images/Figure9.png
-:largura: 600
-:alinhar: centro
-
-Adicione medidas.
-```
-
-##### 9. **Visualização**
-
-Após gerar as medições, você pode plotá-las.
-
-🔴 PARA FAZER
-
-* Clique em "PLOTS" (Figura 30, Seta 1) para visualizar as medições.
-* Defina o tipo de gráfico como "Swarm" e escolha um tema de cores de acordo com sua preferência.
-* Selecione "Y-axis" como "intensity-total-channel-1" e defina "SwarmGroup" como "category"; isso gerará uma curva mostrando como a intensidade da GFP varia entre as diferentes categorias (Figura 30, Seta 2).
-* Selecionar "Show Statistics" exibirá a média, bem como os limites de confiança superior e inferior, no gráfico.
-* Opcionalmente, você pode experimentar diferentes tipos de gráfico e eixos para ver se os dados revelam insights adicionais.
-
-```{figure} ./img/tutorial_images/Figure10.png
-:width: 600
-:align: center
-
-Resultados do gráfico.
-```
-
-##### 10. **Exportar resultados e salvar o projeto**
-
-🔴 PARA FAZER
-
-* Clique em "SALVAR" no canto superior esquerdo para salvar o projeto inteiro. Você verá a animação do logotipo do Piximi conforme o salvamento avança
.
-
-##### 11. **Informações de apoio**
-
-Confira o artigo do Piximi: [https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2](https://www.biorxiv.org/content/10.1101/2024.06.03.597232v2)
-
-Confira a documentação do Piximi:[Documentação do Piximi](https://documentation.piximi.app/intro.html):[https://documentation.piximi.app/intro.html](https://documentation.piximi.app/intro.html)
-
-Relatar bugs/erros ou solicitar recursos [https://github.com/piximi/documentation/issues](https://github.com/piximi/documentation/issues)