Artyom commited on
Commit
6721043
1 Parent(s): e91104d
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. IIR-Lab/Dockerfile +17 -0
  2. IIR-Lab/ISP_pipeline/.gitignore +129 -0
  3. IIR-Lab/ISP_pipeline/Dockerfile +12 -0
  4. IIR-Lab/ISP_pipeline/LICENSE +21 -0
  5. IIR-Lab/ISP_pipeline/__pycache__/debayer.cpython-312.pyc +0 -0
  6. IIR-Lab/ISP_pipeline/__pycache__/debayer.cpython-39.pyc +0 -0
  7. IIR-Lab/ISP_pipeline/__pycache__/imaging.cpython-312.pyc +0 -0
  8. IIR-Lab/ISP_pipeline/__pycache__/imaging.cpython-39.pyc +0 -0
  9. IIR-Lab/ISP_pipeline/__pycache__/process_pngs_isp.cpython-312.pyc +0 -0
  10. IIR-Lab/ISP_pipeline/__pycache__/process_pngs_isp.cpython-39.pyc +0 -0
  11. IIR-Lab/ISP_pipeline/__pycache__/utility.cpython-312.pyc +0 -0
  12. IIR-Lab/ISP_pipeline/__pycache__/utility.cpython-39.pyc +0 -0
  13. IIR-Lab/ISP_pipeline/cfa_pattern_change.py +74 -0
  14. IIR-Lab/ISP_pipeline/debayer.py +1295 -0
  15. IIR-Lab/ISP_pipeline/docker_guidelines.md +29 -0
  16. IIR-Lab/ISP_pipeline/imaging.py +1293 -0
  17. IIR-Lab/ISP_pipeline/lsc_table_r_gr_gb_b_2.npy +3 -0
  18. IIR-Lab/ISP_pipeline/process_pngs_isp.py +276 -0
  19. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__init__.py +3 -0
  20. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/__init__.cpython-312.pyc +0 -0
  21. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/__init__.cpython-39.pyc +0 -0
  22. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/arch_util.cpython-312.pyc +0 -0
  23. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/color.cpython-312.pyc +0 -0
  24. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/color.cpython-39.pyc +0 -0
  25. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/csrnet_network.cpython-312.pyc +0 -0
  26. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/csrnet_network.cpython-39.pyc +0 -0
  27. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/exif_data_formats.cpython-312.pyc +0 -0
  28. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/exif_data_formats.cpython-39.pyc +0 -0
  29. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/exif_utils.cpython-312.pyc +0 -0
  30. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/exif_utils.cpython-39.pyc +0 -0
  31. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/fs.cpython-312.pyc +0 -0
  32. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/fs.cpython-39.pyc +0 -0
  33. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/io.cpython-312.pyc +0 -0
  34. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/io.cpython-39.pyc +0 -0
  35. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/lut_network.cpython-312.pyc +0 -0
  36. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/misc.cpython-312.pyc +0 -0
  37. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/misc.cpython-39.pyc +0 -0
  38. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/optim.cpython-312.pyc +0 -0
  39. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/optim.cpython-39.pyc +0 -0
  40. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline.cpython-312.pyc +0 -0
  41. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline.cpython-39.pyc +0 -0
  42. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline_bm3d.cpython-312.pyc +0 -0
  43. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline_bm3d.cpython-39.pyc +0 -0
  44. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline_utils.cpython-312.pyc +0 -0
  45. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline_utils.cpython-39.pyc +0 -0
  46. IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/refine_network.cpython-312.pyc +0 -0
  47. IIR-Lab/ISP_pipeline/raw_prc_pipeline/arch_util.py +626 -0
  48. IIR-Lab/ISP_pipeline/raw_prc_pipeline/color.py +306 -0
  49. IIR-Lab/ISP_pipeline/raw_prc_pipeline/csrnet_network.py +76 -0
  50. IIR-Lab/ISP_pipeline/raw_prc_pipeline/exif_data_formats.py +22 -0
IIR-Lab/Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
2
+
3
+ ENV TZ=Asia
4
+ ARG DEBIAN_FRONTEND=noninteractive
5
+
6
+ RUN apt-get update && apt-get install -y \
7
+ libpng-dev libjpeg-dev \
8
+ libopencv-dev ffmpeg \
9
+ libgl1-mesa-glx
10
+
11
+ COPY requirements.txt .
12
+ RUN python -m pip install --upgrade pip
13
+ RUN pip install --no-cache -r requirements.txt
14
+
15
+ COPY . /nightimage
16
+ RUN chmod +x /nightimage/run.sh
17
+ WORKDIR /nightimage
IIR-Lab/ISP_pipeline/.gitignore ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .venv
107
+ env/
108
+ venv/
109
+ ENV/
110
+ env.bak/
111
+ venv.bak/
112
+
113
+ # Spyder project settings
114
+ .spyderproject
115
+ .spyproject
116
+
117
+ # Rope project settings
118
+ .ropeproject
119
+
120
+ # mkdocs documentation
121
+ /site
122
+
123
+ # mypy
124
+ .mypy_cache/
125
+ .dmypy.json
126
+ dmypy.json
127
+
128
+ # Pyre type checker
129
+ .pyre/
IIR-Lab/ISP_pipeline/Dockerfile ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ RUN apt-get update && apt-get install -y \
4
+ libpng-dev libjpeg-dev \
5
+ libopencv-dev ffmpeg \
6
+ libgl1-mesa-glx
7
+
8
+ COPY requirements.txt .
9
+ RUN python -m pip install --no-cache -r requirements.txt
10
+
11
+ COPY . /nightimaging
12
+ WORKDIR /nightimaging
IIR-Lab/ISP_pipeline/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Color Reproduction and Synthesis
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
IIR-Lab/ISP_pipeline/__pycache__/debayer.cpython-312.pyc ADDED
Binary file (54.5 kB). View file
 
IIR-Lab/ISP_pipeline/__pycache__/debayer.cpython-39.pyc ADDED
Binary file (21.2 kB). View file
 
IIR-Lab/ISP_pipeline/__pycache__/imaging.cpython-312.pyc ADDED
Binary file (60.6 kB). View file
 
IIR-Lab/ISP_pipeline/__pycache__/imaging.cpython-39.pyc ADDED
Binary file (32.1 kB). View file
 
IIR-Lab/ISP_pipeline/__pycache__/process_pngs_isp.cpython-312.pyc ADDED
Binary file (12.9 kB). View file
 
IIR-Lab/ISP_pipeline/__pycache__/process_pngs_isp.cpython-39.pyc ADDED
Binary file (6.02 kB). View file
 
IIR-Lab/ISP_pipeline/__pycache__/utility.cpython-312.pyc ADDED
Binary file (51.6 kB). View file
 
IIR-Lab/ISP_pipeline/__pycache__/utility.cpython-39.pyc ADDED
Binary file (26.6 kB). View file
 
IIR-Lab/ISP_pipeline/cfa_pattern_change.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import os
3
+ import json
4
+ import cv2
5
+
6
+ def change_cfa_pattern(img):
7
+ raw_colors = np.asarray([3,1,2,0]).reshape((2, 2))
8
+ changed_raw_colors = np.asarray([0,1,2,3]).reshape((2, 2))
9
+ demosaiced_image = np.zeros((img.shape[0]//2, img.shape[1]//2, 4))
10
+ for i in range(2):
11
+ for j in range(2):
12
+ ch = raw_colors[i, j]
13
+ demosaiced_image[:, :, ch] = img[i::2, j::2]
14
+ for i in range(2):
15
+ for j in range(2):
16
+ ch1 = changed_raw_colors[i, j]
17
+ img[i::2, j::2] = demosaiced_image[:, :, ch1]
18
+
19
+ return img
20
+
21
+ def rggb_raw(raw):
22
+ # pack RGGB Bayer raw to 4 channels
23
+ H, W = raw.shape
24
+ raw = raw[None, ...]
25
+ raw_pack = np.concatenate((raw[:, 0:H:2, 0:W:2],
26
+ raw[:, 0:H:2, 1:W:2],
27
+ raw[:, 1:H:2, 0:W:2],
28
+ raw[:, 1:H:2, 1:W:2]), axis=0)
29
+ # tmp = rggb[...,0]
30
+ # rggb[...,0] = rggb[...,-1]
31
+ # rggb[...,-1] = tmp
32
+ return raw_pack
33
+
34
+ def raw_rggb(raws):
35
+ # depack 4 channels raw to RGGB Bayer
36
+ C, H, W = raws.shape
37
+ output = np.zeros((H * 2, W * 2)).astype(np.uint16)
38
+
39
+ output[0:2 * H:2, 0:2 * W:2] = raws[0:1, :, :]
40
+ output[0:2 * H:2, 1:2 * W:2] = raws[1:2, :, :]
41
+ output[1:2 * H:2, 0:2 * W:2] = raws[2:3, :, :]
42
+ output[1:2 * H:2, 1:2 * W:2] = raws[3:4, :, :]
43
+
44
+ return output
45
+
46
+ if __name__ == "__main__":
47
+ json_path = "/data1/02_data/Train_Data/"
48
+ file_name = os.listdir(json_path)
49
+ json_list = []
50
+ for file_name_all in file_name:
51
+ if file_name_all.endswith(".json"):
52
+ json_list.append(json_path+file_name_all)
53
+ a = []
54
+ for i in range(len(json_list)):
55
+ with open(json_list[i],'r',encoding='UTF-8') as f:
56
+ result = json.load(f)
57
+ # a,b = result["noise_profile"]
58
+ # black = result["white_level"]
59
+ cfa_pattern = result["cfa_pattern"]
60
+ if cfa_pattern[0] == 2:
61
+ a.append(json_list[i])
62
+ for j in range(len(a)):
63
+ pic_name,_ = os.path.splitext(a[j])
64
+ img = cv2.imread(pic_name+str(".png"), cv2.IMREAD_UNCHANGED)
65
+ # img1 = cv2.imread(pic_name+".png", cv2.IMREAD_UNCHANGED)
66
+ # test = img - img1
67
+ # print(test)
68
+ changed_img = change_cfa_pattern(img=img)
69
+ # cv2.imwrite(pic_name+"test1.png",changed_img)
70
+ np.save(pic_name+"origin.npy",img)
71
+ np.save(pic_name+"changed.npy",changed_img)
72
+ # np.save("./json_all.npy",result)
73
+
74
+
IIR-Lab/ISP_pipeline/debayer.py ADDED
@@ -0,0 +1,1295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import math
3
+ import time
4
+ import utility
5
+ from scipy import signal
6
+
7
+ # =============================================================
8
+ # function: dbayer_mhc
9
+ # demosaicing using Malvar-He-Cutler algorithm
10
+ # http://www.ipol.im/pub/art/2011/g_mhcd/
11
+ # =============================================================
12
+ def debayer_mhc(raw, bayer_pattern="rggb", clip_range=[0, 65535], timeshow=False):
13
+
14
+ # convert to float32 in case it was not
15
+ raw = np.float32(raw)
16
+
17
+ # dimensions
18
+ width, height = utility.helpers(raw).get_width_height()
19
+
20
+ # number of pixels to pad
21
+ no_of_pixel_pad = 2
22
+ raw = np.pad(raw, \
23
+ (no_of_pixel_pad, no_of_pixel_pad),\
24
+ 'reflect') # reflect would not repeat the border value
25
+
26
+ # allocate space for the R, G, B planes
27
+ R = np.empty( (height + no_of_pixel_pad * 2, width + no_of_pixel_pad * 2), dtype = np.float32 )
28
+ G = np.empty( (height + no_of_pixel_pad * 2, width + no_of_pixel_pad * 2), dtype = np.float32 )
29
+ B = np.empty( (height + no_of_pixel_pad * 2, width + no_of_pixel_pad * 2), dtype = np.float32 )
30
+
31
+ # create a RGB output
32
+ demosaic_out = np.empty( (height, width, 3), dtype = np.float32 )
33
+
34
+ # fill up the directly available values according to the Bayer pattern
35
+ if (bayer_pattern == "rggb"):
36
+
37
+ G[::2, 1::2] = raw[::2, 1::2]
38
+ G[1::2, ::2] = raw[1::2, ::2]
39
+ R[::2, ::2] = raw[::2, ::2]
40
+ B[1::2, 1::2] = raw[1::2, 1::2]
41
+
42
+ # Green channel
43
+ for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
44
+
45
+ # to display progress
46
+ t0 = time.process_time()
47
+
48
+ for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
49
+
50
+ # G at Red location
51
+ if (((i % 2) == 0) and ((j % 2) == 0)):
52
+ G[i, j] = 0.125 * np.sum([-1. * R[i-2, j], \
53
+ 2. * G[i-1, j], \
54
+ -1. * R[i, j-2], 2. * G[i, j-1], 4. * R[i,j], 2. * G[i, j+1], -1. * R[i, j+2],\
55
+ 2. * G[i+1, j], \
56
+ -1. * R[i+2, j]])
57
+ # G at Blue location
58
+ elif (((i % 2) != 0) and ((j % 2) != 0)):
59
+ G[i, j] = 0.125 * np.sum([-1. * B[i-2, j], \
60
+ 2. * G[i-1, j], \
61
+ -1. * B[i, j-2], 2. * G[i, j-1], 4. * B[i,j], 2. * G[i, j+1], -1. * B[i, j+2], \
62
+ 2. * G[i+1, j],\
63
+ -1. * B[i+2, j]])
64
+ if (timeshow):
65
+ elapsed_time = time.process_time() - t0
66
+ print("Green: row index: " + str(i-1) + " of " + str(height) + \
67
+ " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
68
+
69
+ # Red and Blue channel
70
+ for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
71
+
72
+ # to display progress
73
+ t0 = time.process_time()
74
+
75
+ for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
76
+
77
+ # Green locations in Red rows
78
+ if (((i % 2) == 0) and ((j % 2) != 0)):
79
+ # R at Green locations in Red rows
80
+ R[i, j] = 0.125 * np.sum([.5 * G[i-2, j],\
81
+ -1. * G[i-1, j-1], -1. * G[i-1, j+1], \
82
+ -1. * G[i, j-2], 4. * R[i, j-1], 5. * G[i,j], 4. * R[i, j+1], -1. * G[i, j+2], \
83
+ -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
84
+ .5 * G[i+2, j]])
85
+
86
+ # B at Green locations in Red rows
87
+ B[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
88
+ -1. * G[i-1, j-1], 4. * B[i-1, j], -1. * G[i-1, j+1], \
89
+ .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
90
+ -1. * G[i+1, j-1], 4. * B[i+1,j], -1. * G[i+1, j+1], \
91
+ -1. * G[i+2, j]])
92
+
93
+ # Green locations in Blue rows
94
+ elif (((i % 2) != 0) and ((j % 2) == 0)):
95
+
96
+ # R at Green locations in Blue rows
97
+ R[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
98
+ -1. * G[i-1, j-1], 4. * R[i-1, j], -1. * G[i-1, j+1], \
99
+ .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
100
+ -1. * G[i+1, j-1], 4. * R[i+1, j], -1. * G[i+1, j+1], \
101
+ -1. * G[i+2, j]])
102
+
103
+ # B at Green locations in Blue rows
104
+ B[i, j] = 0.125 * np.sum([.5 * G[i-2, j], \
105
+ -1. * G [i-1, j-1], -1. * G[i-1, j+1], \
106
+ -1. * G[i, j-2], 4. * B[i, j-1], 5. * G[i,j], 4. * B[i, j+1], -1. * G[i, j+2], \
107
+ -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
108
+ .5 * G[i+2, j]])
109
+
110
+ # R at Blue locations
111
+ elif (((i % 2) != 0) and ((j % 2) != 0)):
112
+ R[i, j] = 0.125 * np.sum([-1.5 * B[i-2, j], \
113
+ 2. * R[i-1, j-1], 2. * R[i-1, j+1], \
114
+ -1.5 * B[i, j-2], 6. * B[i,j], -1.5 * B[i, j+2], \
115
+ 2. * R[i+1, j-1], 2. * R[i+1, j+1], \
116
+ -1.5 * B[i+2, j]])
117
+
118
+ # B at Red locations
119
+ elif (((i % 2) == 0) and ((j % 2) == 0)):
120
+ B[i, j] = 0.125 * np.sum([-1.5 * R[i-2, j], \
121
+ 2. * B[i-1, j-1], 2. * B[i-1, j+1], \
122
+ -1.5 * R[i, j-2], 6. * R[i,j], -1.5 * R[i, j+2], \
123
+ 2. * B[i+1, j-1], 2. * B[i+1, j+1], \
124
+ -1.5 * R[i+2, j]])
125
+
126
+ if (timeshow):
127
+ elapsed_time = time.process_time() - t0
128
+ print("Red/Blue: row index: " + str(i-1) + " of " + str(height) + \
129
+ " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
130
+
131
+
132
+ elif (bayer_pattern == "gbrg"):
133
+
134
+ G[::2, ::2] = raw[::2, ::2]
135
+ G[1::2, 1::2] = raw[1::2, 1::2]
136
+ R[1::2, ::2] = raw[1::2, ::2]
137
+ B[::2, 1::2] = raw[::2, 1::2]
138
+
139
+ # Green channel
140
+ for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
141
+
142
+ # to display progress
143
+ t0 = time.process_time()
144
+
145
+ for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
146
+
147
+ # G at Red location
148
+ if (((i % 2) != 0) and ((j % 2) == 0)):
149
+ G[i, j] = 0.125 * np.sum([-1. * R[i-2, j], \
150
+ 2. * G[i-1, j], \
151
+ -1. * R[i, j-2], 2. * G[i, j-1], 4. * R[i,j], 2. * G[i, j+1], -1. * R[i, j+2],\
152
+ 2. * G[i+1, j], \
153
+ -1. * R[i+2, j]])
154
+ # G at Blue location
155
+ elif (((i % 2) == 0) and ((j % 2) != 0)):
156
+ G[i, j] = 0.125 * np.sum([-1. * B[i-2, j], \
157
+ 2. * G[i-1, j], \
158
+ -1. * B[i, j-2], 2. * G[i, j-1], 4. * B[i,j], 2. * G[i, j+1], -1. * B[i, j+2], \
159
+ 2. * G[i+1, j],\
160
+ -1. * B[i+2, j]])
161
+ if (timeshow):
162
+ elapsed_time = time.process_time() - t0
163
+ print("Green: row index: " + str(i-1) + " of " + str(height) + \
164
+ " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
165
+
166
+ # Red and Blue channel
167
+ for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
168
+
169
+ # to display progress
170
+ t0 = time.process_time()
171
+
172
+ for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
173
+
174
+ # Green locations in Red rows
175
+ if (((i % 2) != 0) and ((j % 2) != 0)):
176
+ # R at Green locations in Red rows
177
+ R[i, j] = 0.125 * np.sum([.5 * G[i-2, j],\
178
+ -1. * G[i-1, j-1], -1. * G[i-1, j+1], \
179
+ -1. * G[i, j-2], 4. * R[i, j-1], 5. * G[i,j], 4. * R[i, j+1], -1. * G[i, j+2], \
180
+ -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
181
+ .5 * G[i+2, j]])
182
+
183
+ # B at Green locations in Red rows
184
+ B[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
185
+ -1. * G[i-1, j-1], 4. * B[i-1, j], -1. * G[i-1, j+1], \
186
+ .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
187
+ -1. * G[i+1, j-1], 4. * B[i+1,j], -1. * G[i+1, j+1], \
188
+ -1. * G[i+2, j]])
189
+
190
+ # Green locations in Blue rows
191
+ elif (((i % 2) == 0) and ((j % 2) == 0)):
192
+
193
+ # R at Green locations in Blue rows
194
+ R[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
195
+ -1. * G[i-1, j-1], 4. * R[i-1, j], -1. * G[i-1, j+1], \
196
+ .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
197
+ -1. * G[i+1, j-1], 4. * R[i+1, j], -1. * G[i+1, j+1], \
198
+ -1. * G[i+2, j]])
199
+
200
+ # B at Green locations in Blue rows
201
+ B[i, j] = 0.125 * np.sum([.5 * G[i-2, j], \
202
+ -1. * G [i-1, j-1], -1. * G[i-1, j+1], \
203
+ -1. * G[i, j-2], 4. * B[i, j-1], 5. * G[i,j], 4. * B[i, j+1], -1. * G[i, j+2], \
204
+ -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
205
+ .5 * G[i+2, j]])
206
+
207
+ # R at Blue locations
208
+ elif (((i % 2) == 0) and ((j % 2) != 0)):
209
+ R[i, j] = 0.125 * np.sum([-1.5 * B[i-2, j], \
210
+ 2. * R[i-1, j-1], 2. * R[i-1, j+1], \
211
+ -1.5 * B[i, j-2], 6. * B[i,j], -1.5 * B[i, j+2], \
212
+ 2. * R[i+1, j-1], 2. * R[i+1, j+1], \
213
+ -1.5 * B[i+2, j]])
214
+
215
+ # B at Red locations
216
+ elif (((i % 2) != 0) and ((j % 2) == 0)):
217
+ B[i, j] = 0.125 * np.sum([-1.5 * R[i-2, j], \
218
+ 2. * B[i-1, j-1], 2. * B[i-1, j+1], \
219
+ -1.5 * R[i, j-2], 6. * R[i,j], -1.5 * R[i, j+2], \
220
+ 2. * B[i+1, j-1], 2. * B[i+1, j+1], \
221
+ -1.5 * R[i+2, j]])
222
+
223
+ if (timeshow):
224
+ elapsed_time = time.process_time() - t0
225
+ print("Red/Blue: row index: " + str(i-1) + " of " + str(height) + \
226
+ " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
227
+
228
+ elif (bayer_pattern == "grbg"):
229
+
230
+ G[::2, ::2] = raw[::2, ::2]
231
+ G[1::2, 1::2] = raw[1::2, 1::2]
232
+ R[::2, 1::2] = raw[::2, 1::2]
233
+ B[1::2, ::2] = raw[1::2, ::2]
234
+
235
+ # Green channel
236
+ for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
237
+
238
+ # to display progress
239
+ t0 = time.process_time()
240
+
241
+ for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
242
+
243
+ # G at Red location
244
+ if (((i % 2) == 0) and ((j % 2) != 0)):
245
+ G[i, j] = 0.125 * np.sum([-1. * R[i-2, j], \
246
+ 2. * G[i-1, j], \
247
+ -1. * R[i, j-2], 2. * G[i, j-1], 4. * R[i,j], 2. * G[i, j+1], -1. * R[i, j+2],\
248
+ 2. * G[i+1, j], \
249
+ -1. * R[i+2, j]])
250
+ # G at Blue location
251
+ elif (((i % 2) != 0) and ((j % 2) == 0)):
252
+ G[i, j] = 0.125 * np.sum([-1. * B[i-2, j], \
253
+ 2. * G[i-1, j], \
254
+ -1. * B[i, j-2], 2. * G[i, j-1], 4. * B[i,j], 2. * G[i, j+1], -1. * B[i, j+2], \
255
+ 2. * G[i+1, j],\
256
+ -1. * B[i+2, j]])
257
+ if (timeshow):
258
+ elapsed_time = time.process_time() - t0
259
+ print("Green: row index: " + str(i-1) + " of " + str(height) + \
260
+ " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
261
+
262
+ # Red and Blue channel
263
+ for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
264
+
265
+ # to display progress
266
+ t0 = time.process_time()
267
+
268
+ for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
269
+
270
+ # Green locations in Red rows
271
+ if (((i % 2) == 0) and ((j % 2) == 0)):
272
+ # R at Green locations in Red rows
273
+ R[i, j] = 0.125 * np.sum([.5 * G[i-2, j],\
274
+ -1. * G[i-1, j-1], -1. * G[i-1, j+1], \
275
+ -1. * G[i, j-2], 4. * R[i, j-1], 5. * G[i,j], 4. * R[i, j+1], -1. * G[i, j+2], \
276
+ -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
277
+ .5 * G[i+2, j]])
278
+
279
+ # B at Green locations in Red rows
280
+ B[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
281
+ -1. * G[i-1, j-1], 4. * B[i-1, j], -1. * G[i-1, j+1], \
282
+ .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
283
+ -1. * G[i+1, j-1], 4. * B[i+1,j], -1. * G[i+1, j+1], \
284
+ -1. * G[i+2, j]])
285
+
286
+ # Green locations in Blue rows
287
+ elif (((i % 2) != 0) and ((j % 2) != 0)):
288
+
289
+ # R at Green locations in Blue rows
290
+ R[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
291
+ -1. * G[i-1, j-1], 4. * R[i-1, j], -1. * G[i-1, j+1], \
292
+ .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
293
+ -1. * G[i+1, j-1], 4. * R[i+1, j], -1. * G[i+1, j+1], \
294
+ -1. * G[i+2, j]])
295
+
296
+ # B at Green locations in Blue rows
297
+ B[i, j] = 0.125 * np.sum([.5 * G[i-2, j], \
298
+ -1. * G [i-1, j-1], -1. * G[i-1, j+1], \
299
+ -1. * G[i, j-2], 4. * B[i, j-1], 5. * G[i,j], 4. * B[i, j+1], -1. * G[i, j+2], \
300
+ -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
301
+ .5 * G[i+2, j]])
302
+
303
+ # R at Blue locations
304
+ elif (((i % 2) != 0) and ((j % 2) == 0)):
305
+ R[i, j] = 0.125 * np.sum([-1.5 * B[i-2, j], \
306
+ 2. * R[i-1, j-1], 2. * R[i-1, j+1], \
307
+ -1.5 * B[i, j-2], 6. * B[i,j], -1.5 * B[i, j+2], \
308
+ 2. * R[i+1, j-1], 2. * R[i+1, j+1], \
309
+ -1.5 * B[i+2, j]])
310
+
311
+ # B at Red locations
312
+ elif (((i % 2) == 0) and ((j % 2) != 0)):
313
+ B[i, j] = 0.125 * np.sum([-1.5 * R[i-2, j], \
314
+ 2. * B[i-1, j-1], 2. * B[i-1, j+1], \
315
+ -1.5 * R[i, j-2], 6. * R[i,j], -1.5 * R[i, j+2], \
316
+ 2. * B[i+1, j-1], 2. * B[i+1, j+1], \
317
+ -1.5 * R[i+2, j]])
318
+
319
+ if (timeshow):
320
+ elapsed_time = time.process_time() - t0
321
+ print("Red/Blue: row index: " + str(i-1) + " of " + str(height) + \
322
+ " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
323
+
324
+ elif (bayer_pattern == "bggr"):
325
+
326
+ G[::2, 1::2] = raw[::2, 1::2]
327
+ G[1::2, ::2] = raw[1::2, ::2]
328
+ R[1::2, 1::2] = raw[1::2, 1::2]
329
+ B[::2, ::2] = raw[::2, ::2]
330
+
331
+ # Green channel
332
+ for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
333
+
334
+ # to display progress
335
+ t0 = time.process_time()
336
+
337
+ for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
338
+
339
+ # G at Red location
340
+ if (((i % 2) != 0) and ((j % 2) != 0)):
341
+ G[i, j] = 0.125 * np.sum([-1. * R[i-2, j], \
342
+ 2. * G[i-1, j], \
343
+ -1. * R[i, j-2], 2. * G[i, j-1], 4. * R[i,j], 2. * G[i, j+1], -1. * R[i, j+2],\
344
+ 2. * G[i+1, j], \
345
+ -1. * R[i+2, j]])
346
+ # G at Blue location
347
+ elif (((i % 2) == 0) and ((j % 2) == 0)):
348
+ G[i, j] = 0.125 * np.sum([-1. * B[i-2, j], \
349
+ 2. * G[i-1, j], \
350
+ -1. * B[i, j-2], 2. * G[i, j-1], 4. * B[i,j], 2. * G[i, j+1], -1. * B[i, j+2], \
351
+ 2. * G[i+1, j],\
352
+ -1. * B[i+2, j]])
353
+ if (timeshow):
354
+ elapsed_time = time.process_time() - t0
355
+ print("Green: row index: " + str(i-1) + " of " + str(height) + \
356
+ " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
357
+
358
+ # Red and Blue channel
359
+ for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
360
+
361
+ # to display progress
362
+ t0 = time.process_time()
363
+
364
+ for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
365
+
366
+ # Green locations in Red rows
367
+ if (((i % 2) != 0) and ((j % 2) == 0)):
368
+ # R at Green locations in Red rows
369
+ R[i, j] = 0.125 * np.sum([.5 * G[i-2, j],\
370
+ -1. * G[i-1, j-1], -1. * G[i-1, j+1], \
371
+ -1. * G[i, j-2], 4. * R[i, j-1], 5. * G[i,j], 4. * R[i, j+1], -1. * G[i, j+2], \
372
+ -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
373
+ .5 * G[i+2, j]])
374
+
375
+ # B at Green locations in Red rows
376
+ B[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
377
+ -1. * G[i-1, j-1], 4. * B[i-1, j], -1. * G[i-1, j+1], \
378
+ .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
379
+ -1. * G[i+1, j-1], 4. * B[i+1,j], -1. * G[i+1, j+1], \
380
+ -1. * G[i+2, j]])
381
+
382
+ # Green locations in Blue rows
383
+ elif (((i % 2) == 0) and ((j % 2) != 0)):
384
+
385
+ # R at Green locations in Blue rows
386
+ R[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
387
+ -1. * G[i-1, j-1], 4. * R[i-1, j], -1. * G[i-1, j+1], \
388
+ .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
389
+ -1. * G[i+1, j-1], 4. * R[i+1, j], -1. * G[i+1, j+1], \
390
+ -1. * G[i+2, j]])
391
+
392
+ # B at Green locations in Blue rows
393
+ B[i, j] = 0.125 * np.sum([.5 * G[i-2, j], \
394
+ -1. * G [i-1, j-1], -1. * G[i-1, j+1], \
395
+ -1. * G[i, j-2], 4. * B[i, j-1], 5. * G[i,j], 4. * B[i, j+1], -1. * G[i, j+2], \
396
+ -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
397
+ .5 * G[i+2, j]])
398
+
399
+ # R at Blue locations
400
+ elif (((i % 2) == 0) and ((j % 2) == 0)):
401
+ R[i, j] = 0.125 * np.sum([-1.5 * B[i-2, j], \
402
+ 2. * R[i-1, j-1], 2. * R[i-1, j+1], \
403
+ -1.5 * B[i, j-2], 6. * B[i,j], -1.5 * B[i, j+2], \
404
+ 2. * R[i+1, j-1], 2. * R[i+1, j+1], \
405
+ -1.5 * B[i+2, j]])
406
+
407
+ # B at Red locations
408
+ elif (((i % 2) != 0) and ((j % 2) != 0)):
409
+ B[i, j] = 0.125 * np.sum([-1.5 * R[i-2, j], \
410
+ 2. * B[i-1, j-1], 2. * B[i-1, j+1], \
411
+ -1.5 * R[i, j-2], 6. * R[i,j], -1.5 * R[i, j+2], \
412
+ 2. * B[i+1, j-1], 2. * B[i+1, j+1], \
413
+ -1.5 * R[i+2, j]])
414
+
415
+ if (timeshow):
416
+ elapsed_time = time.process_time() - t0
417
+ print("Red/Blue: row index: " + str(i-1) + " of " + str(height) + \
418
+ " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
419
+
420
+ else:
421
+ print("Invalid bayer pattern. Valid pattern can be rggb, gbrg, grbg, bggr")
422
+ return demosaic_out # This will be all zeros
423
+
424
+ # Fill up the RGB output with interpolated values
425
+ demosaic_out[0:height, 0:width, 0] = R[no_of_pixel_pad : height + no_of_pixel_pad, \
426
+ no_of_pixel_pad : width + no_of_pixel_pad]
427
+ demosaic_out[0:height, 0:width, 1] = G[no_of_pixel_pad : height + no_of_pixel_pad, \
428
+ no_of_pixel_pad : width + no_of_pixel_pad]
429
+ demosaic_out[0:height, 0:width, 2] = B[no_of_pixel_pad : height + no_of_pixel_pad, \
430
+ no_of_pixel_pad : width + no_of_pixel_pad]
431
+
432
+ demosaic_out = np.clip(demosaic_out, clip_range[0], clip_range[1])
433
+ return demosaic_out
434
+
435
+
436
+ def fill_channel_directional_weight(data, bayer_pattern):
437
+
438
+ #== Calculate the directional weights (weight_N, weight_E, weight_S, weight_W.
439
+ # where N, E, S, W stand for north, east, south, and west.)
440
+ data = np.asarray(data)
441
+ v = np.asarray(signal.convolve2d(data, [[1],[0],[-1]], mode="same", boundary="symm"))
442
+ h = np.asarray(signal.convolve2d(data, [[1, 0, -1]], mode="same", boundary="symm"))
443
+
444
+ weight_N = np.zeros(np.shape(data), dtype=np.float32)
445
+ weight_E = np.zeros(np.shape(data), dtype=np.float32)
446
+ weight_S = np.zeros(np.shape(data), dtype=np.float32)
447
+ weight_W = np.zeros(np.shape(data), dtype=np.float32)
448
+
449
+ value_N = np.zeros(np.shape(data), dtype=np.float32)
450
+ value_E = np.zeros(np.shape(data), dtype=np.float32)
451
+ value_S = np.zeros(np.shape(data), dtype=np.float32)
452
+ value_W = np.zeros(np.shape(data), dtype=np.float32)
453
+
454
+ if ((bayer_pattern == "rggb") or (bayer_pattern == "bggr")):
455
+
456
+
457
+ # note that in the following the locations in the comments are given
458
+ # assuming the bayer_pattern rggb
459
+
460
+ #== CALCULATE WEIGHTS IN B LOCATIONS
461
+ weight_N[1::2, 1::2] = np.abs(v[1::2, 1::2]) + np.abs(v[::2, 1::2])
462
+
463
+ # repeating the column before the last to the right so that sampling
464
+ # does not cause any dimension mismatch
465
+ temp_h_b = np.hstack((h, np.atleast_2d(h[:, -2]).T))
466
+ weight_E[1::2, 1::2] = np.abs(h[1::2, 1::2]) + np.abs(temp_h_b[1::2, 2::2])
467
+
468
+ # repeating the row before the last row to the bottom so that sampling
469
+ # does not cause any dimension mismatch
470
+ temp_v_b = np.vstack((v, v[-1]))
471
+ weight_S[1::2, 1::2] = np.abs(v[1::2, 1::2]) + np.abs(temp_v_b[2::2, 1::2])
472
+ weight_W[1::2, 1::2] = np.abs(h[1::2, 1::2]) + np.abs(h[1::2, ::2])
473
+
474
+ #== CALCULATE WEIGHTS IN R LOCATIONS
475
+ # repeating the second row at the top of matrix so that sampling does
476
+ # not cause any dimension mismatch, also remove the bottom row
477
+ temp_v_r = np.delete(np.vstack((v[1], v)), -1, 0)
478
+ weight_N[::2, ::2] = np.abs(v[::2, ::2]) + np.abs(temp_v_r[::2, ::2])
479
+
480
+ weight_E[::2, ::2] = np.abs(h[::2, ::2]) + np.abs(h[::2, 1::2])
481
+
482
+ weight_S[::2, ::2] = np.abs(v[::2, ::2]) + np.abs(v[1::2, ::2])
483
+
484
+ # repeating the second column at the left of matrix so that sampling
485
+ # does not cause any dimension mismatch, also remove the rightmost
486
+ # column
487
+ temp_h_r = np.delete(np.hstack((np.atleast_2d(h[:, 1]).T, h)), -1, 1)
488
+ weight_W[::2, ::2] = np.abs(h[::2, ::2]) + np.abs(temp_h_r[::2, ::2])
489
+
490
+ weight_N = np.divide(1., 1. + weight_N)
491
+ weight_E = np.divide(1., 1. + weight_E)
492
+ weight_S = np.divide(1., 1. + weight_S)
493
+ weight_W = np.divide(1., 1. + weight_W)
494
+
495
+ #== CALCULATE DIRECTIONAL ESTIMATES IN B LOCATIONS
496
+ value_N[1::2, 1::2] = data[::2, 1::2] + v[::2, 1::2] / 2.
497
+
498
+ # repeating the column before the last to the right so that sampling
499
+ # does not cause any dimension mismatch
500
+ temp = np.hstack((data, np.atleast_2d(data[:, -2]).T))
501
+ value_E[1::2, 1::2] = temp[1::2, 2::2] - temp_h_b[1::2, 2::2] / 2.
502
+
503
+ # repeating the row before the last row to the bottom so that sampling
504
+ # does not cause any dimension mismatch
505
+ temp = np.vstack((data, data[-1]))
506
+ value_S[1::2, 1::2] = temp[2::2, 1::2] - temp_v_b[2::2, 1::2] / 2.
507
+
508
+ value_W[1::2, 1::2] = data[1::2, ::2] + h[1::2, ::2] / 2.
509
+
510
+ #== CALCULATE DIRECTIONAL ESTIMATES IN R LOCATIONS
511
+ # repeating the second row at the top of matrix so that sampling does
512
+ # not cause any dimension mismatch, also remove the bottom row
513
+ temp = np.delete(np.vstack((data[1], data)), -1, 0)
514
+ value_N[::2, ::2] = temp[::2, ::2] + temp_v_r[::2, ::2] / 2.
515
+
516
+ value_E[::2, ::2] = data[::2, 1::2] - h[::2, 1::2] / 2.
517
+
518
+ value_S[::2, ::2] = data[1::2, ::2] - v[1::2, ::2] / 2.
519
+
520
+ # repeating the second column at the left of matrix so that sampling
521
+ # does not cause any dimension mismatch, also remove the rightmost
522
+ # column
523
+ temp = np.delete(np.hstack((np.atleast_2d(data[:, 1]).T, data)), -1, 1)
524
+ value_W[::2, ::2] = temp[::2, ::2] + temp_h_r[::2, ::2] / 2.
525
+
526
+ output = np.zeros(np.shape(data), dtype=np.float32)
527
+ output = np.divide((np.multiply(value_N, weight_N) + \
528
+ np.multiply(value_E, weight_E) + \
529
+ np.multiply(value_S, weight_S) + \
530
+ np.multiply(value_W, weight_W)),\
531
+ (weight_N + weight_E + weight_S + weight_W))
532
+
533
+ output[::2, 1::2] = data[::2, 1::2]
534
+ output[1::2, ::2] = data[1::2, ::2]
535
+
536
+ return output
537
+
538
+ elif ((bayer_pattern == "gbrg") or (bayer_pattern == "grbg")):
539
+
540
+ # note that in the following the locations in the comments are given
541
+ # assuming the bayer_pattern gbrg
542
+
543
+ #== CALCULATE WEIGHTS IN B LOCATIONS
544
+ # repeating the second row at the top of matrix so that sampling does
545
+ # not cause any dimension mismatch, also remove the bottom row
546
+ temp_v_b = np.delete(np.vstack((v[1], v)), -1, 0)
547
+ weight_N[::2, 1::2] = np.abs(v[::2, 1::2]) + np.abs(temp_v_b[::2, 1::2])
548
+
549
+ # repeating the column before the last to the right so that sampling
550
+ # does not cause any dimension mismatch
551
+ temp_h_b = np.hstack((h, np.atleast_2d(h[:, -2]).T))
552
+ weight_E[::2, 1::2] = np.abs(h[::2, 1::2]) + np.abs(temp_h_b[::2, 2::2])
553
+
554
+ # repeating the row before the last row to the bottom so that sampling
555
+ # does not cause any dimension mismatch
556
+ weight_S[::2, 1::2] = np.abs(v[::2, 1::2]) + np.abs(v[1::2, 1::2])
557
+ weight_W[::2, 1::2] = np.abs(h[::2, 1::2]) + np.abs(h[::2, ::2])
558
+
559
+ #== CALCULATE WEIGHTS IN R LOCATIONS
560
+ weight_N[1::2, ::2] = np.abs(v[1::2, ::2]) + np.abs(v[::2, ::2])
561
+ weight_E[1::2, ::2] = np.abs(h[1::2, ::2]) + np.abs(h[1::2, 1::2])
562
+
563
+ # repeating the row before the last row to the bottom so that sampling
564
+ # does not cause any dimension mismatch
565
+ temp_v_r = np.vstack((v, v[-1]))
566
+ weight_S[1::2, ::2] = np.abs(v[1::2, ::2]) + np.abs(temp_v_r[2::2, ::2])
567
+
568
+ # repeating the second column at the left of matrix so that sampling
569
+ # does not cause any dimension mismatch, also remove the rightmost
570
+ # column
571
+ temp_h_r = np.delete(np.hstack((np.atleast_2d(h[:, 1]).T, h)), -1, 1)
572
+ weight_W[1::2, ::2] = np.abs(h[1::2, ::2]) + np.abs(temp_h_r[1::2, ::2])
573
+
574
+ weight_N = np.divide(1., 1. + weight_N)
575
+ weight_E = np.divide(1., 1. + weight_E)
576
+ weight_S = np.divide(1., 1. + weight_S)
577
+ weight_W = np.divide(1., 1. + weight_W)
578
+
579
+ #== CALCULATE DIRECTIONAL ESTIMATES IN B LOCATIONS
580
+ # repeating the second row at the top of matrix so that sampling does
581
+ # not cause any dimension mismatch, also remove the bottom row
582
+ temp = np.delete(np.vstack((data[1], data)), -1, 0)
583
+ value_N[::2, 1::2] = temp[::2, 1::2] + temp_v_b[::2, 1::2] / 2.
584
+
585
+ # repeating the column before the last to the right so that sampling
586
+ # does not cause any dimension mismatch
587
+ temp = np.hstack((data, np.atleast_2d(data[:, -2]).T))
588
+ value_E[::2, 1::2] = temp[::2, 2::2] - temp_h_b[::2, 2::2] / 2.
589
+
590
+ # repeating the row before the last row to the bottom so that sampling
591
+ # does not cause any dimension mismatch
592
+ value_S[::2, 1::2] = data[1::2, 1::2] - v[1::2, 1::2] / 2.
593
+
594
+ value_W[::2, 1::2] = data[::2, ::2] + h[::2, ::2] / 2.
595
+
596
+ #== CALCULATE DIRECTIONAL ESTIMATES IN R LOCATIONS
597
+ # repeating the second row at the top of matrix so that sampling does
598
+ # not cause any dimension mismatch, also remove the bottom row
599
+ value_N[1::2, ::2] = data[::2, ::2] + v[::2, ::2] / 2.
600
+ value_E[1::2, ::2] = data[1::2, 1::2] - h[1::2, 1::2] / 2.
601
+
602
+ # repeating the row before the last row to the bottom so that sampling
603
+ # does not cause any dimension mismatch
604
+ temp = np.vstack((data, data[-1]))
605
+ value_S[1::2, ::2] = temp[2::2, ::2] - temp_v_r[2::2, ::2] / 2.
606
+
607
+ # repeating the second column at the left of matrix so that sampling
608
+ # does not cause any dimension mismatch, also remove the rightmost
609
+ # column
610
+ temp = np.delete(np.hstack((np.atleast_2d(data[:, 1]).T, data)), -1, 1)
611
+ value_W[1::2, ::2] = temp[1::2, ::2] + temp_h_r[1::2, ::2] / 2.
612
+
613
+ output = np.zeros(np.shape(data), dtype=np.float32)
614
+ output = np.divide((np.multiply(value_N, weight_N) + \
615
+ np.multiply(value_E, weight_E) + \
616
+ np.multiply(value_S, weight_S) + \
617
+ np.multiply(value_W, weight_W)),\
618
+ (weight_N + weight_E + weight_S + weight_W))
619
+
620
+ output[::2, ::2] = data[::2, ::2]
621
+ output[1::2, 1::2] = data[1::2, 1::2]
622
+
623
+ return output
624
+
625
+
626
+ def fill_br_locations(data, G, bayer_pattern):
627
+
628
+ # Fill up the B/R values interpolated at R/B locations
629
+ B = np.zeros(np.shape(data), dtype=np.float32)
630
+ R = np.zeros(np.shape(data), dtype=np.float32)
631
+
632
+ data = np.asarray(data)
633
+ G = np.asarray(G)
634
+ d1 = np.asarray(signal.convolve2d(data, [[-1, 0, 0],[0, 0, 0], [0, 0, 1]], mode="same", boundary="symm"))
635
+ d2 = np.asarray(signal.convolve2d(data, [[0, 0, 1], [0, 0, 0], [-1, 0, 0]], mode="same", boundary="symm"))
636
+
637
+ df_NE = np.asarray(signal.convolve2d(G, [[0, 0, 0], [0, 1, 0], [-1, 0, 0]], mode="same", boundary="symm"))
638
+ df_SE = np.asarray(signal.convolve2d(G, [[-1, 0, 0], [0, 1, 0], [0, 0, 0]], mode="same", boundary="symm"))
639
+ df_SW = np.asarray(signal.convolve2d(G, [[0, 0, -1], [0, 1, 0], [0, 0, 0]], mode="same", boundary="symm"))
640
+ df_NW = np.asarray(signal.convolve2d(G, [[0, 0, 0], [0, 1, 0], [0, 0, -1]], mode="same", boundary="symm"))
641
+
642
+ weight_NE = np.zeros(np.shape(data), dtype=np.float32)
643
+ weight_SE = np.zeros(np.shape(data), dtype=np.float32)
644
+ weight_SW = np.zeros(np.shape(data), dtype=np.float32)
645
+ weight_NW = np.zeros(np.shape(data), dtype=np.float32)
646
+
647
+ value_NE = np.zeros(np.shape(data), dtype=np.float32)
648
+ value_SE = np.zeros(np.shape(data), dtype=np.float32)
649
+ value_SW = np.zeros(np.shape(data), dtype=np.float32)
650
+ value_NW = np.zeros(np.shape(data), dtype=np.float32)
651
+
652
+ if ((bayer_pattern == "rggb") or (bayer_pattern == "bggr")):
653
+
654
+ #== weights for B in R locations
655
+ weight_NE[::2, ::2] = np.abs(d2[::2, ::2]) + np.abs(df_NE[::2, ::2])
656
+ weight_SE[::2, ::2] = np.abs(d1[::2, ::2]) + np.abs(df_SE[::2, ::2])
657
+ weight_SW[::2, ::2] = np.abs(d2[::2, ::2]) + np.abs(df_SW[::2, ::2])
658
+ weight_NW[::2, ::2] = np.abs(d1[::2, ::2]) + np.abs(df_NW[::2, ::2])
659
+
660
+ #== weights for R in B locations
661
+ weight_NE[1::2, 1::2] = np.abs(d2[1::2, 1::2]) + np.abs(df_NE[1::2, 1::2])
662
+ weight_SE[1::2, 1::2] = np.abs(d1[1::2, 1::2]) + np.abs(df_SE[1::2, 1::2])
663
+ weight_SW[1::2, 1::2] = np.abs(d2[1::2, 1::2]) + np.abs(df_SW[1::2, 1::2])
664
+ weight_NW[1::2, 1::2] = np.abs(d1[1::2, 1::2]) + np.abs(df_NW[1::2, 1::2])
665
+
666
+ weight_NE = np.divide(1., 1. + weight_NE)
667
+ weight_SE = np.divide(1., 1. + weight_SE)
668
+ weight_SW = np.divide(1., 1. + weight_SW)
669
+ weight_NW = np.divide(1., 1. + weight_NW)
670
+
671
+ #== directional estimates of B in R locations
672
+ # repeating the second row at the top of matrix so that sampling does
673
+ # not cause any dimension mismatch, also remove the bottom row
674
+ temp = np.delete(np.vstack((data[1], data)), -1, 0)
675
+ value_NE[::2, ::2] = temp[::2, 1::2] + df_NE[::2, ::2] / 2.
676
+ value_SE[::2, ::2] = data[1::2, 1::2] + df_SE[::2, ::2] / 2.
677
+ # repeating the second column at the left of matrix so that sampling
678
+ # does not cause any dimension mismatch, also remove the rightmost
679
+ # column
680
+ temp = np.delete(np.hstack((np.atleast_2d(data[:, 1]).T, data)), -1, 1)
681
+ value_SW[::2, ::2] = temp[1::2, ::2] + df_SW[::2, ::2] / 2.
682
+
683
+ # repeating the second row at the top of matrix so that sampling does
684
+ # not cause any dimension mismatch, also remove the bottom row
685
+ temp = np.delete(np.vstack((data[1], data)), -1, 0)
686
+ # repeating the second column at the left of matrix so that sampling
687
+ # does not cause any dimension mismatch, also remove the rightmost
688
+ # column
689
+ temp = np.delete(np.hstack((np.atleast_2d(temp[:, 1]).T, temp)), -1, 1)
690
+ value_NW[::2, ::2] = temp[::2, ::2] + df_NW[::2, ::2]
691
+
692
+ #== directional estimates of R in B locations
693
+ # repeating the column before the last to the right so that sampling
694
+ # does not cause any dimension mismatch
695
+ temp = np.hstack((data, np.atleast_2d(data[:, -2]).T))
696
+ value_NE[1::2, 1::2] = temp[::2, 2::2] + df_NE[1::2, 1::2] / 2.
697
+ # repeating the column before the last to the right so that sampling
698
+ # does not cause any dimension mismatch
699
+ temp = np.hstack((data, np.atleast_2d(data[:, -2]).T))
700
+ # repeating the row before the last row to the bottom so that sampling
701
+ # does not cause any dimension mismatch
702
+ temp = np.vstack((temp, temp[-1]))
703
+ value_SE[1::2, 1::2] = temp[2::2, 2::2] + df_SE[1::2, 1::2] / 2.
704
+ # repeating the row before the last row to the bottom so that sampling
705
+ # does not cause any dimension mismatch
706
+ temp = np.vstack((data, data[-1]))
707
+ value_SW[1::2, 1::2] = temp[2::2, ::2] + df_SW[1::2, 1::2] / 2.
708
+ value_NW[1::2, 1::2] = data[::2, ::2] + df_NW[1::2, 1::2] / 2.
709
+
710
+ RB = np.divide(np.multiply(weight_NE, value_NE) + \
711
+ np.multiply(weight_SE, value_SE) + \
712
+ np.multiply(weight_SW, value_SW) + \
713
+ np.multiply(weight_NW, value_NW),\
714
+ (weight_NE + weight_SE + weight_SW + weight_NW))
715
+
716
+ if (bayer_pattern == "rggb"):
717
+
718
+ R[1::2, 1::2] = RB[1::2, 1::2]
719
+ R[::2, ::2] = data[::2, ::2]
720
+ B[::2, ::2] = RB[::2, ::2]
721
+ B[1::2, 1::2] = data[1::2, 1::2]
722
+
723
+ elif (bayer_pattern == "bggr"):
724
+ R[::2, ::2] = RB[::2, ::2]
725
+ R[1::2, 1::2] = data[1::2, 1::2]
726
+ B[1::2, 1::2] = RB[1::2, 1::2]
727
+ B[::2, ::2] = data[::2, ::2]
728
+
729
+
730
+ R[1::2, ::2] = G[1::2, ::2]
731
+ R[::2, 1::2] = G[::2, 1::2]
732
+ R = fill_channel_directional_weight(R, "gbrg")
733
+
734
+ B[1::2, ::2] = G[1::2, ::2]
735
+ B[::2, 1::2] = G[::2, 1::2]
736
+ B = fill_channel_directional_weight(B, "gbrg")
737
+
738
+
739
+ elif ((bayer_pattern == "grbg") or (bayer_pattern == "gbrg")):
740
+ #== weights for B in R locations
741
+ weight_NE[::2, 1::2] = np.abs(d2[::2, 1::2]) + np.abs(df_NE[::2, 1::2])
742
+ weight_SE[::2, 1::2] = np.abs(d1[::2, 1::2]) + np.abs(df_SE[::2, 1::2])
743
+ weight_SW[::2, 1::2] = np.abs(d2[::2, 1::2]) + np.abs(df_SW[::2, 1::2])
744
+ weight_NW[::2, 1::2] = np.abs(d1[::2, 1::2]) + np.abs(df_NW[::2, 1::2])
745
+
746
+ #== weights for R in B locations
747
+ weight_NE[1::2, ::2] = np.abs(d2[1::2, ::2]) + np.abs(df_NE[1::2, ::2])
748
+ weight_SE[1::2, ::2] = np.abs(d1[1::2, ::2]) + np.abs(df_SE[1::2, ::2])
749
+ weight_SW[1::2, ::2] = np.abs(d2[1::2, ::2]) + np.abs(df_SW[1::2, ::2])
750
+ weight_NW[1::2, ::2] = np.abs(d1[1::2, ::2]) + np.abs(df_NW[1::2, ::2])
751
+
752
+ weight_NE = np.divide(1., 1. + weight_NE)
753
+ weight_SE = np.divide(1., 1. + weight_SE)
754
+ weight_SW = np.divide(1., 1. + weight_SW)
755
+ weight_NW = np.divide(1., 1. + weight_NW)
756
+
757
+ #== directional estimates of B in R locations
758
+ # repeating the second row at the top of matrix so that sampling does
759
+ # not cause any dimension mismatch, also remove the bottom row
760
+ temp = np.delete(np.vstack((data[1], data)), -1, 0)
761
+ # repeating the column before the last to the right so that sampling
762
+ # does not cause any dimension mismatch
763
+ temp = np.hstack((temp, np.atleast_2d(temp[:, -2]).T))
764
+ value_NE[::2, 1::2] = temp[::2, 2::2] + df_NE[::2, 1::2] / 2.
765
+ # repeating the column before the last to the right so that sampling
766
+ # does not cause any dimension mismatch
767
+ temp = np.hstack((data, np.atleast_2d(data[:, -2]).T))
768
+ value_SE[::2, 1::2] = temp[1::2, 2::2] + df_SE[::2, 1::2] / 2.
769
+ value_SW[::2, 1::2] = data[1::2, ::2] + df_SW[::2, 1::2] / 2.
770
+
771
+ # repeating the second row at the top of matrix so that sampling does
772
+ # not cause any dimension mismatch, also remove the bottom row
773
+ temp = np.delete(np.vstack((data[1], data)), -1, 0)
774
+ value_NW[::2, 1::2] = temp[::2, ::2] + df_NW[::2, 1::2]
775
+
776
+ #== directional estimates of R in B locations
777
+ value_NE[1::2, ::2] = data[::2, 1::2] + df_NE[1::2, ::2] / 2.
778
+ # repeating the column before the last to the right so that sampling
779
+ # does not cause any dimension mismatch
780
+ temp = np.hstack((data, np.atleast_2d(data[:, -2]).T))
781
+ # repeating the row before the last row to the bottom so that sampling
782
+ # does not cause any dimension mismatch
783
+ temp = np.vstack((temp, temp[-1]))
784
+ value_SE[1::2, ::2] = temp[2::2, 1::2] + df_SE[1::2, ::2] / 2.
785
+ # repeating the row before the last row to the bottom so that sampling
786
+ # does not cause any dimension mismatch
787
+ temp = np.vstack((data, data[-1]))
788
+ # repeating the second column at the left of matrix so that sampling
789
+ # does not cause any dimension mismatch, also remove the rightmost
790
+ # column
791
+ temp = np.delete(np.hstack((np.atleast_2d(temp[:, 1]).T, temp)), -1, 1)
792
+ value_SW[1::2, ::2] = temp[2::2, ::2] + df_SW[1::2, ::2] / 2.
793
+ # repeating the second column at the left of matrix so that sampling
794
+ # does not cause any dimension mismatch, also remove the rightmost
795
+ # column
796
+ temp = np.delete(np.hstack((np.atleast_2d(data[:, 1]).T, data)), -1, 1)
797
+ value_NW[1::2, ::2] = temp[::2, ::2] + df_NW[1::2, ::2] / 2.
798
+
799
+ RB = np.divide(np.multiply(weight_NE, value_NE) + \
800
+ np.multiply(weight_SE, value_SE) + \
801
+ np.multiply(weight_SW, value_SW) + \
802
+ np.multiply(weight_NW, value_NW),\
803
+ (weight_NE + weight_SE + weight_SW + weight_NW))
804
+
805
+ if (bayer_pattern == "grbg"):
806
+
807
+ R[1::2, ::2] = RB[1::2, ::2]
808
+ R[::2, 1::2] = data[::2, 1::2]
809
+ B[::2, 1::2] = RB[::2, 1::2]
810
+ B[1::2, ::2] = data[1::2, ::2]
811
+
812
+ elif (bayer_pattern == "gbrg"):
813
+ R[::2, 1::2] = RB[::2, 1::2]
814
+ R[1::2, ::2] = data[1::2, ::2]
815
+ B[1::2, ::2] = RB[1::2, ::2]
816
+ B[::2, 1::2] = data[::2, 1::2]
817
+
818
+
819
+ R[::2, ::2] = G[::2, ::2]
820
+ R[1::2, 1::2] = G[1::2, 1::2]
821
+ R = fill_channel_directional_weight(R, "rggb")
822
+
823
+ B[1::2, 1::2] = G[1::2, 1::2]
824
+ B[::2, ::2] = G[::2, ::2]
825
+ B = fill_channel_directional_weight(B, "rggb")
826
+
827
+
828
+ return B, R
829
+
830
+ # # =============================================================
831
+ # # function: dbayer_mhc_fast
832
+ # # demosaicing using Malvar-He-Cutler algorithm
833
+ # # http://www.ipol.im/pub/art/2011/g_mhcd/
834
+ # # =============================================================
835
+ # def debayer_mhc_fast(raw, bayer_pattern="rggb", clip_range=[0, 65535], timeshow=False):
836
+ #
837
+ # # convert to float32 in case it was not
838
+ # raw = np.float32(raw)
839
+ #
840
+ # # dimensions
841
+ # width, height = utility.helpers(raw).get_width_height()
842
+ #
843
+ # # allocate space for the R, G, B planes
844
+ # R = np.empty((height, width), dtype = np.float32)
845
+ # G = np.empty((height, width), dtype = np.float32)
846
+ # B = np.empty((height, width), dtype = np.float32)
847
+ #
848
+ # # create a RGB output
849
+ # demosaic_out = np.empty( (height, width, 3), dtype = np.float32 )
850
+ #
851
+ # # define the convolution kernels
852
+ # kernel_g_at_rb = [[0., 0., -1., 0., 0.],\
853
+ # [0., 0., 2., 0., 0.],\
854
+ # [-1., 2., 4., 2., -1.],\
855
+ # [0., 0., 2., 0., 0.],\
856
+ # [0., 0., -1., 0., 0.]] * .125
857
+ #
858
+ # kernel_r_at_gr = [[0., 0., .5, 0., 0.],\
859
+ # [0., -1., 0., -1., 0.],\
860
+ # [-1., 4., 5., 4., -1.],\
861
+ # [0., -1., 0., -1., 0.],\
862
+ # [0., 0., .5, 0., 0.]] * .125
863
+ #
864
+ # kernel_b_at_gr = [[0., 0., -1., 0., 0.],\
865
+ # [0., -1., 4., -1., 0.],\
866
+ # [.5., 0., 5., 0., .5],\
867
+ # [0., -1., 4., -1., 0],\
868
+ # [0., 0., -1., 0., 0.]] * .125
869
+ #
870
+ # kernel_r_at_gb = [[0., 0., -1., 0., 0.],\
871
+ # [0., -1., 4., -1., 0.],\
872
+ # [.5, 0., 5., 0., .5],\
873
+ # [0., -1., 4., -1., 0.],\
874
+ # [0., 0., -1., 0., 0.]] * .125
875
+ #
876
+ # kernel_b_at_gb = [[0., 0., .5, 0., 0.],\
877
+ # [0., -1., 0., -1., 0.],\
878
+ # [-1., 4., 5., 4., -1.],\
879
+ # [0., -1., 0., -1., 0.],\
880
+ # [0., 0., .5, 0., 0.]] * .125
881
+ #
882
+ # kernel_r_at_b = [[0., 0., -1.5, 0., 0.],\
883
+ # [0., 2., 0., 2., 0.],\
884
+ # [-1.5, 0., 6., 0., -1.5],\
885
+ # [0., 2., 0., 2., 0.],\
886
+ # [0., 0., -1.5, 0., 0.]] * .125
887
+ #
888
+ # kernel_b_at_r = [[0., 0., -1.5, 0., 0.],\
889
+ # [0., 2., 0., 2., 0.],\
890
+ # [-1.5, 0., 6., 0., -1.5],\
891
+ # [0., 2., 0., 2., 0.],\
892
+ # [0., 0., -1.5, 0., 0.]] * .125
893
+ #
894
+ #
895
+ #
896
+ # # fill up the directly available values according to the Bayer pattern
897
+ # if (bayer_pattern == "rggb"):
898
+ #
899
+ # G[::2, 1::2] = raw[::2, 1::2]
900
+ # G[1::2, ::2] = raw[1::2, ::2]
901
+ # R[::2, ::2] = raw[::2, ::2]
902
+ # B[1::2, 1::2] = raw[1::2, 1::2]
903
+ #
904
+ # # Green channel
905
+ # for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
906
+ #
907
+ # # to display progress
908
+ # t0 = time.process_time()
909
+ #
910
+ # for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
911
+ #
912
+ # # G at Red location
913
+ # if (((i % 2) == 0) and ((j % 2) == 0)):
914
+ # G[i, j] = 0.125 * np.sum([-1. * R[i-2, j], \
915
+ # 2. * G[i-1, j], \
916
+ # -1. * R[i, j-2], 2. * G[i, j-1], 4. * R[i,j], 2. * G[i, j+1], -1. * R[i, j+2],\
917
+ # 2. * G[i+1, j], \
918
+ # -1. * R[i+2, j]])
919
+ # # G at Blue location
920
+ # elif (((i % 2) != 0) and ((j % 2) != 0)):
921
+ # G[i, j] = 0.125 * np.sum([-1. * B[i-2, j], \
922
+ # 2. * G[i-1, j], \
923
+ # -1. * B[i, j-2], 2. * G[i, j-1], 4. * B[i,j], 2. * G[i, j+1], -1. * B[i, j+2], \
924
+ # 2. * G[i+1, j],\
925
+ # -1. * B[i+2, j]])
926
+ # if (timeshow):
927
+ # elapsed_time = time.process_time() - t0
928
+ # print("Green: row index: " + str(i-1) + " of " + str(height) + \
929
+ # " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
930
+ #
931
+ # # Red and Blue channel
932
+ # for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
933
+ #
934
+ # # to display progress
935
+ # t0 = time.process_time()
936
+ #
937
+ # for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
938
+ #
939
+ # # Green locations in Red rows
940
+ # if (((i % 2) == 0) and ((j % 2) != 0)):
941
+ # # R at Green locations in Red rows
942
+ # R[i, j] = 0.125 * np.sum([.5 * G[i-2, j],\
943
+ # -1. * G[i-1, j-1], -1. * G[i-1, j+1], \
944
+ # -1. * G[i, j-2], 4. * R[i, j-1], 5. * G[i,j], 4. * R[i, j+1], -1. * G[i, j+2], \
945
+ # -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
946
+ # .5 * G[i+2, j]])
947
+ #
948
+ # # B at Green locations in Red rows
949
+ # B[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
950
+ # -1. * G[i-1, j-1], 4. * B[i-1, j], -1. * G[i-1, j+1], \
951
+ # .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
952
+ # -1. * G[i+1, j-1], 4. * B[i+1,j], -1. * G[i+1, j+1], \
953
+ # -1. * G[i+2, j]])
954
+ #
955
+ # # Green locations in Blue rows
956
+ # elif (((i % 2) != 0) and ((j % 2) == 0)):
957
+ #
958
+ # # R at Green locations in Blue rows
959
+ # R[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
960
+ # -1. * G[i-1, j-1], 4. * R[i-1, j], -1. * G[i-1, j+1], \
961
+ # .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
962
+ # -1. * G[i+1, j-1], 4. * R[i+1, j], -1. * G[i+1, j+1], \
963
+ # -1. * G[i+2, j]])
964
+ #
965
+ # # B at Green locations in Blue rows
966
+ # B[i, j] = 0.125 * np.sum([.5 * G[i-2, j], \
967
+ # -1. * G [i-1, j-1], -1. * G[i-1, j+1], \
968
+ # -1. * G[i, j-2], 4. * B[i, j-1], 5. * G[i,j], 4. * B[i, j+1], -1. * G[i, j+2], \
969
+ # -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
970
+ # .5 * G[i+2, j]])
971
+ #
972
+ # # R at Blue locations
973
+ # elif (((i % 2) != 0) and ((j % 2) != 0)):
974
+ # R[i, j] = 0.125 * np.sum([-1.5 * B[i-2, j], \
975
+ # 2. * R[i-1, j-1], 2. * R[i-1, j+1], \
976
+ # -1.5 * B[i, j-2], 6. * B[i,j], -1.5 * B[i, j+2], \
977
+ # 2. * R[i+1, j-1], 2. * R[i+1, j+1], \
978
+ # -1.5 * B[i+2, j]])
979
+ #
980
+ # # B at Red locations
981
+ # elif (((i % 2) == 0) and ((j % 2) == 0)):
982
+ # B[i, j] = 0.125 * np.sum([-1.5 * R[i-2, j], \
983
+ # 2. * B[i-1, j-1], 2. * B[i-1, j+1], \
984
+ # -1.5 * R[i, j-2], 6. * R[i,j], -1.5 * R[i, j+2], \
985
+ # 2. * B[i+1, j-1], 2. * B[i+1, j+1], \
986
+ # -1.5 * R[i+2, j]])
987
+ #
988
+ # if (timeshow):
989
+ # elapsed_time = time.process_time() - t0
990
+ # print("Red/Blue: row index: " + str(i-1) + " of " + str(height) + \
991
+ # " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
992
+ #
993
+ #
994
+ # elif (bayer_pattern == "gbrg"):
995
+ #
996
+ # G[::2, ::2] = raw[::2, ::2]
997
+ # G[1::2, 1::2] = raw[1::2, 1::2]
998
+ # R[1::2, ::2] = raw[1::2, ::2]
999
+ # B[::2, 1::2] = raw[::2, 1::2]
1000
+ #
1001
+ # # Green channel
1002
+ # for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
1003
+ #
1004
+ # # to display progress
1005
+ # t0 = time.process_time()
1006
+ #
1007
+ # for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
1008
+ #
1009
+ # # G at Red location
1010
+ # if (((i % 2) != 0) and ((j % 2) == 0)):
1011
+ # G[i, j] = 0.125 * np.sum([-1. * R[i-2, j], \
1012
+ # 2. * G[i-1, j], \
1013
+ # -1. * R[i, j-2], 2. * G[i, j-1], 4. * R[i,j], 2. * G[i, j+1], -1. * R[i, j+2],\
1014
+ # 2. * G[i+1, j], \
1015
+ # -1. * R[i+2, j]])
1016
+ # # G at Blue location
1017
+ # elif (((i % 2) == 0) and ((j % 2) != 0)):
1018
+ # G[i, j] = 0.125 * np.sum([-1. * B[i-2, j], \
1019
+ # 2. * G[i-1, j], \
1020
+ # -1. * B[i, j-2], 2. * G[i, j-1], 4. * B[i,j], 2. * G[i, j+1], -1. * B[i, j+2], \
1021
+ # 2. * G[i+1, j],\
1022
+ # -1. * B[i+2, j]])
1023
+ # if (timeshow):
1024
+ # elapsed_time = time.process_time() - t0
1025
+ # print("Green: row index: " + str(i-1) + " of " + str(height) + \
1026
+ # " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
1027
+ #
1028
+ # # Red and Blue channel
1029
+ # for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
1030
+ #
1031
+ # # to display progress
1032
+ # t0 = time.process_time()
1033
+ #
1034
+ # for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
1035
+ #
1036
+ # # Green locations in Red rows
1037
+ # if (((i % 2) != 0) and ((j % 2) != 0)):
1038
+ # # R at Green locations in Red rows
1039
+ # R[i, j] = 0.125 * np.sum([.5 * G[i-2, j],\
1040
+ # -1. * G[i-1, j-1], -1. * G[i-1, j+1], \
1041
+ # -1. * G[i, j-2], 4. * R[i, j-1], 5. * G[i,j], 4. * R[i, j+1], -1. * G[i, j+2], \
1042
+ # -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
1043
+ # .5 * G[i+2, j]])
1044
+ #
1045
+ # # B at Green locations in Red rows
1046
+ # B[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
1047
+ # -1. * G[i-1, j-1], 4. * B[i-1, j], -1. * G[i-1, j+1], \
1048
+ # .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
1049
+ # -1. * G[i+1, j-1], 4. * B[i+1,j], -1. * G[i+1, j+1], \
1050
+ # -1. * G[i+2, j]])
1051
+ #
1052
+ # # Green locations in Blue rows
1053
+ # elif (((i % 2) == 0) and ((j % 2) == 0)):
1054
+ #
1055
+ # # R at Green locations in Blue rows
1056
+ # R[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
1057
+ # -1. * G[i-1, j-1], 4. * R[i-1, j], -1. * G[i-1, j+1], \
1058
+ # .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
1059
+ # -1. * G[i+1, j-1], 4. * R[i+1, j], -1. * G[i+1, j+1], \
1060
+ # -1. * G[i+2, j]])
1061
+ #
1062
+ # # B at Green locations in Blue rows
1063
+ # B[i, j] = 0.125 * np.sum([.5 * G[i-2, j], \
1064
+ # -1. * G [i-1, j-1], -1. * G[i-1, j+1], \
1065
+ # -1. * G[i, j-2], 4. * B[i, j-1], 5. * G[i,j], 4. * B[i, j+1], -1. * G[i, j+2], \
1066
+ # -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
1067
+ # .5 * G[i+2, j]])
1068
+ #
1069
+ # # R at Blue locations
1070
+ # elif (((i % 2) == 0) and ((j % 2) != 0)):
1071
+ # R[i, j] = 0.125 * np.sum([-1.5 * B[i-2, j], \
1072
+ # 2. * R[i-1, j-1], 2. * R[i-1, j+1], \
1073
+ # -1.5 * B[i, j-2], 6. * B[i,j], -1.5 * B[i, j+2], \
1074
+ # 2. * R[i+1, j-1], 2. * R[i+1, j+1], \
1075
+ # -1.5 * B[i+2, j]])
1076
+ #
1077
+ # # B at Red locations
1078
+ # elif (((i % 2) != 0) and ((j % 2) == 0)):
1079
+ # B[i, j] = 0.125 * np.sum([-1.5 * R[i-2, j], \
1080
+ # 2. * B[i-1, j-1], 2. * B[i-1, j+1], \
1081
+ # -1.5 * R[i, j-2], 6. * R[i,j], -1.5 * R[i, j+2], \
1082
+ # 2. * B[i+1, j-1], 2. * B[i+1, j+1], \
1083
+ # -1.5 * R[i+2, j]])
1084
+ #
1085
+ # if (timeshow):
1086
+ # elapsed_time = time.process_time() - t0
1087
+ # print("Red/Blue: row index: " + str(i-1) + " of " + str(height) + \
1088
+ # " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
1089
+ #
1090
+ # elif (bayer_pattern == "grbg"):
1091
+ #
1092
+ # G[::2, ::2] = raw[::2, ::2]
1093
+ # G[1::2, 1::2] = raw[1::2, 1::2]
1094
+ # R[::2, 1::2] = raw[::2, 1::2]
1095
+ # B[1::2, ::2] = raw[1::2, ::2]
1096
+ #
1097
+ # # Green channel
1098
+ # for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
1099
+ #
1100
+ # # to display progress
1101
+ # t0 = time.process_time()
1102
+ #
1103
+ # for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
1104
+ #
1105
+ # # G at Red location
1106
+ # if (((i % 2) == 0) and ((j % 2) != 0)):
1107
+ # G[i, j] = 0.125 * np.sum([-1. * R[i-2, j], \
1108
+ # 2. * G[i-1, j], \
1109
+ # -1. * R[i, j-2], 2. * G[i, j-1], 4. * R[i,j], 2. * G[i, j+1], -1. * R[i, j+2],\
1110
+ # 2. * G[i+1, j], \
1111
+ # -1. * R[i+2, j]])
1112
+ # # G at Blue location
1113
+ # elif (((i % 2) != 0) and ((j % 2) == 0)):
1114
+ # G[i, j] = 0.125 * np.sum([-1. * B[i-2, j], \
1115
+ # 2. * G[i-1, j], \
1116
+ # -1. * B[i, j-2], 2. * G[i, j-1], 4. * B[i,j], 2. * G[i, j+1], -1. * B[i, j+2], \
1117
+ # 2. * G[i+1, j],\
1118
+ # -1. * B[i+2, j]])
1119
+ # if (timeshow):
1120
+ # elapsed_time = time.process_time() - t0
1121
+ # print("Green: row index: " + str(i-1) + " of " + str(height) + \
1122
+ # " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
1123
+ #
1124
+ # # Red and Blue channel
1125
+ # for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
1126
+ #
1127
+ # # to display progress
1128
+ # t0 = time.process_time()
1129
+ #
1130
+ # for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
1131
+ #
1132
+ # # Green locations in Red rows
1133
+ # if (((i % 2) == 0) and ((j % 2) == 0)):
1134
+ # # R at Green locations in Red rows
1135
+ # R[i, j] = 0.125 * np.sum([.5 * G[i-2, j],\
1136
+ # -1. * G[i-1, j-1], -1. * G[i-1, j+1], \
1137
+ # -1. * G[i, j-2], 4. * R[i, j-1], 5. * G[i,j], 4. * R[i, j+1], -1. * G[i, j+2], \
1138
+ # -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
1139
+ # .5 * G[i+2, j]])
1140
+ #
1141
+ # # B at Green locations in Red rows
1142
+ # B[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
1143
+ # -1. * G[i-1, j-1], 4. * B[i-1, j], -1. * G[i-1, j+1], \
1144
+ # .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
1145
+ # -1. * G[i+1, j-1], 4. * B[i+1,j], -1. * G[i+1, j+1], \
1146
+ # -1. * G[i+2, j]])
1147
+ #
1148
+ # # Green locations in Blue rows
1149
+ # elif (((i % 2) != 0) and ((j % 2) != 0)):
1150
+ #
1151
+ # # R at Green locations in Blue rows
1152
+ # R[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
1153
+ # -1. * G[i-1, j-1], 4. * R[i-1, j], -1. * G[i-1, j+1], \
1154
+ # .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
1155
+ # -1. * G[i+1, j-1], 4. * R[i+1, j], -1. * G[i+1, j+1], \
1156
+ # -1. * G[i+2, j]])
1157
+ #
1158
+ # # B at Green locations in Blue rows
1159
+ # B[i, j] = 0.125 * np.sum([.5 * G[i-2, j], \
1160
+ # -1. * G [i-1, j-1], -1. * G[i-1, j+1], \
1161
+ # -1. * G[i, j-2], 4. * B[i, j-1], 5. * G[i,j], 4. * B[i, j+1], -1. * G[i, j+2], \
1162
+ # -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
1163
+ # .5 * G[i+2, j]])
1164
+ #
1165
+ # # R at Blue locations
1166
+ # elif (((i % 2) != 0) and ((j % 2) == 0)):
1167
+ # R[i, j] = 0.125 * np.sum([-1.5 * B[i-2, j], \
1168
+ # 2. * R[i-1, j-1], 2. * R[i-1, j+1], \
1169
+ # -1.5 * B[i, j-2], 6. * B[i,j], -1.5 * B[i, j+2], \
1170
+ # 2. * R[i+1, j-1], 2. * R[i+1, j+1], \
1171
+ # -1.5 * B[i+2, j]])
1172
+ #
1173
+ # # B at Red locations
1174
+ # elif (((i % 2) == 0) and ((j % 2) != 0)):
1175
+ # B[i, j] = 0.125 * np.sum([-1.5 * R[i-2, j], \
1176
+ # 2. * B[i-1, j-1], 2. * B[i-1, j+1], \
1177
+ # -1.5 * R[i, j-2], 6. * R[i,j], -1.5 * R[i, j+2], \
1178
+ # 2. * B[i+1, j-1], 2. * B[i+1, j+1], \
1179
+ # -1.5 * R[i+2, j]])
1180
+ #
1181
+ # if (timeshow):
1182
+ # elapsed_time = time.process_time() - t0
1183
+ # print("Red/Blue: row index: " + str(i-1) + " of " + str(height) + \
1184
+ # " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
1185
+ #
1186
+ # elif (bayer_pattern == "bggr"):
1187
+ #
1188
+ # G[::2, 1::2] = raw[::2, 1::2]
1189
+ # G[1::2, ::2] = raw[1::2, ::2]
1190
+ # R[1::2, 1::2] = raw[1::2, 1::2]
1191
+ # B[::2, ::2] = raw[::2, ::2]
1192
+ #
1193
+ # # Green channel
1194
+ # for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
1195
+ #
1196
+ # # to display progress
1197
+ # t0 = time.process_time()
1198
+ #
1199
+ # for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
1200
+ #
1201
+ # # G at Red location
1202
+ # if (((i % 2) != 0) and ((j % 2) != 0)):
1203
+ # G[i, j] = 0.125 * np.sum([-1. * R[i-2, j], \
1204
+ # 2. * G[i-1, j], \
1205
+ # -1. * R[i, j-2], 2. * G[i, j-1], 4. * R[i,j], 2. * G[i, j+1], -1. * R[i, j+2],\
1206
+ # 2. * G[i+1, j], \
1207
+ # -1. * R[i+2, j]])
1208
+ # # G at Blue location
1209
+ # elif (((i % 2) == 0) and ((j % 2) == 0)):
1210
+ # G[i, j] = 0.125 * np.sum([-1. * B[i-2, j], \
1211
+ # 2. * G[i-1, j], \
1212
+ # -1. * B[i, j-2], 2. * G[i, j-1], 4. * B[i,j], 2. * G[i, j+1], -1. * B[i, j+2], \
1213
+ # 2. * G[i+1, j],\
1214
+ # -1. * B[i+2, j]])
1215
+ # if (timeshow):
1216
+ # elapsed_time = time.process_time() - t0
1217
+ # print("Green: row index: " + str(i-1) + " of " + str(height) + \
1218
+ # " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
1219
+ #
1220
+ # # Red and Blue channel
1221
+ # for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
1222
+ #
1223
+ # # to display progress
1224
+ # t0 = time.process_time()
1225
+ #
1226
+ # for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
1227
+ #
1228
+ # # Green locations in Red rows
1229
+ # if (((i % 2) != 0) and ((j % 2) == 0)):
1230
+ # # R at Green locations in Red rows
1231
+ # R[i, j] = 0.125 * np.sum([.5 * G[i-2, j],\
1232
+ # -1. * G[i-1, j-1], -1. * G[i-1, j+1], \
1233
+ # -1. * G[i, j-2], 4. * R[i, j-1], 5. * G[i,j], 4. * R[i, j+1], -1. * G[i, j+2], \
1234
+ # -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
1235
+ # .5 * G[i+2, j]])
1236
+ #
1237
+ # # B at Green locations in Red rows
1238
+ # B[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
1239
+ # -1. * G[i-1, j-1], 4. * B[i-1, j], -1. * G[i-1, j+1], \
1240
+ # .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
1241
+ # -1. * G[i+1, j-1], 4. * B[i+1,j], -1. * G[i+1, j+1], \
1242
+ # -1. * G[i+2, j]])
1243
+ #
1244
+ # # Green locations in Blue rows
1245
+ # elif (((i % 2) == 0) and ((j % 2) != 0)):
1246
+ #
1247
+ # # R at Green locations in Blue rows
1248
+ # R[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
1249
+ # -1. * G[i-1, j-1], 4. * R[i-1, j], -1. * G[i-1, j+1], \
1250
+ # .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
1251
+ # -1. * G[i+1, j-1], 4. * R[i+1, j], -1. * G[i+1, j+1], \
1252
+ # -1. * G[i+2, j]])
1253
+ #
1254
+ # # B at Green locations in Blue rows
1255
+ # B[i, j] = 0.125 * np.sum([.5 * G[i-2, j], \
1256
+ # -1. * G [i-1, j-1], -1. * G[i-1, j+1], \
1257
+ # -1. * G[i, j-2], 4. * B[i, j-1], 5. * G[i,j], 4. * B[i, j+1], -1. * G[i, j+2], \
1258
+ # -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
1259
+ # .5 * G[i+2, j]])
1260
+ #
1261
+ # # R at Blue locations
1262
+ # elif (((i % 2) == 0) and ((j % 2) == 0)):
1263
+ # R[i, j] = 0.125 * np.sum([-1.5 * B[i-2, j], \
1264
+ # 2. * R[i-1, j-1], 2. * R[i-1, j+1], \
1265
+ # -1.5 * B[i, j-2], 6. * B[i,j], -1.5 * B[i, j+2], \
1266
+ # 2. * R[i+1, j-1], 2. * R[i+1, j+1], \
1267
+ # -1.5 * B[i+2, j]])
1268
+ #
1269
+ # # B at Red locations
1270
+ # elif (((i % 2) != 0) and ((j % 2) != 0)):
1271
+ # B[i, j] = 0.125 * np.sum([-1.5 * R[i-2, j], \
1272
+ # 2. * B[i-1, j-1], 2. * B[i-1, j+1], \
1273
+ # -1.5 * R[i, j-2], 6. * R[i,j], -1.5 * R[i, j+2], \
1274
+ # 2. * B[i+1, j-1], 2. * B[i+1, j+1], \
1275
+ # -1.5 * R[i+2, j]])
1276
+ #
1277
+ # if (timeshow):
1278
+ # elapsed_time = time.process_time() - t0
1279
+ # print("Red/Blue: row index: " + str(i-1) + " of " + str(height) + \
1280
+ # " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
1281
+ #
1282
+ # else:
1283
+ # print("Invalid bayer pattern. Valid pattern can be rggb, gbrg, grbg, bggr")
1284
+ # return demosaic_out # This will be all zeros
1285
+ #
1286
+ # # Fill up the RGB output with interpolated values
1287
+ # demosaic_out[0:height, 0:width, 0] = R[no_of_pixel_pad : height + no_of_pixel_pad, \
1288
+ # no_of_pixel_pad : width + no_of_pixel_pad]
1289
+ # demosaic_out[0:height, 0:width, 1] = G[no_of_pixel_pad : height + no_of_pixel_pad, \
1290
+ # no_of_pixel_pad : width + no_of_pixel_pad]
1291
+ # demosaic_out[0:height, 0:width, 2] = B[no_of_pixel_pad : height + no_of_pixel_pad, \
1292
+ # no_of_pixel_pad : width + no_of_pixel_pad]
1293
+ #
1294
+ # demosaic_out = np.clip(demosaic_out, clip_range[0], clip_range[1])
1295
+ # return demosaic_out
IIR-Lab/ISP_pipeline/docker_guidelines.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Final submission docker guidelines
2
+
3
+ All final submissions should be submitted as a docker image. The docker image should be built from the dockerfile in the root of the repository. The docker image should be built with the following command:
4
+
5
+ ```bash
6
+ docker build -t <your_team_name> .
7
+ ```
8
+
9
+ The docker image should be run with the following command:
10
+
11
+ ```bash
12
+ docker run -it --rm -v $(pwd)/data:/data <your_team_name> ./run.sh
13
+ ```
14
+
15
+ As output, the docker image should produce images in `JPEG` format in the `/data` directory. All produced files should be named as the input files, but with the `.jpg` extension. The filenames should be the same as the RAW input filenames in `/data`. Make sure that your code does not create any other folders in the `/data` directory. Docker should contain all the necessary dependencies to run the code. It also should include the `run.sh` script as the entrypoint. Take into account that inside the docker image, the `/data` directory will be mounted to the `$(pwd)/data` directory of the host machine. This means that the docker image should be able to read the input files from the `/data` directory and write the output files to the `/data` directory.
16
+
17
+ ## Example
18
+
19
+ We providing an example of a docker image that can be used as a reference. It can be found in our [github repository](https://github.com/createcolor/nightimaging23)
20
+
21
+ Your dockerfile may look like this:
22
+
23
+ ```dockerfile
24
+ FROM tensorflow/tensorflow:2.3.0
25
+ WORKDIR /opt/app
26
+ COPY . .
27
+ RUN pip install -r /app/requirements.txt
28
+ CMD ["./run.sh"]
29
+ ```
IIR-Lab/ISP_pipeline/imaging.py ADDED
@@ -0,0 +1,1293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Note:
2
+ # The functions try to operate in float32 data precision
3
+
4
+ # =============================================================
5
+ # Import the libraries
6
+ # =============================================================
7
+ import numpy as np # array operations
8
+ import math # basing math operations
9
+ from matplotlib import pylab as plt
10
+ import time # measure runtime
11
+ import utility
12
+ import debayer
13
+ import sys # float precision
14
+ from scipy import signal # convolutions
15
+ from scipy import interpolate # for interpolation
16
+
17
+
18
+
19
+ # =============================================================
20
+ # class: ImageInfo
21
+ # Helps set up necessary information/metadata of the image
22
+ # =============================================================
23
+ class ImageInfo:
24
+ def __init__(self, name = "unknown", data = -1, is_show = False):
25
+ self.name = name
26
+ self.data = data
27
+ self.size = np.shape(self.data)
28
+ self.is_show = is_show
29
+ self.color_space = "unknown"
30
+ self.bayer_pattern = "unknown"
31
+ self.channel_gain = (1.0, 1.0, 1.0, 1.0)
32
+ self.bit_depth = 0
33
+ self.black_level = (0, 0, 0, 0)
34
+ self.white_level = (1, 1, 1, 1)
35
+ self.color_matrix = [[1., .0, .0],\
36
+ [.0, 1., .0],\
37
+ [.0, .0, 1.]] # xyz2cam
38
+ self.min_value = np.min(self.data)
39
+ self.max_value = np.max(self.data)
40
+ self.data_type = self.data.dtype
41
+
42
+ # Display image only isShow = True
43
+ if (self.is_show):
44
+ plt.imshow(self.data)
45
+ plt.show()
46
+
47
+ def set_data(self, data):
48
+ # This function updates data and corresponding fields
49
+ self.data = data
50
+ self.size = np.shape(self.data)
51
+ self.data_type = self.data.dtype
52
+ self.min_value = np.min(self.data)
53
+ self.max_value = np.max(self.data)
54
+
55
+ def get_size(self):
56
+ return self.size
57
+
58
+ def get_width(self):
59
+ return self.size[1]
60
+
61
+ def get_height(self):
62
+ return self.size[0]
63
+
64
+ def get_depth(self):
65
+ if np.ndim(self.data) > 2:
66
+ return self.size[2]
67
+ else:
68
+ return 0
69
+
70
+ def set_color_space(self, color_space):
71
+ self.color_space = color_space
72
+
73
+ def get_color_space(self):
74
+ return self.color_space
75
+
76
+ def set_channel_gain(self, channel_gain):
77
+ self.channel_gain = channel_gain
78
+
79
+ def get_channel_gain(self):
80
+ return self.channel_gain
81
+
82
+ def set_color_matrix(self, color_matrix):
83
+ self.color_matrix = color_matrix
84
+
85
+ def get_color_matrix(self):
86
+ return self.color_matrix
87
+
88
+ def set_bayer_pattern(self, bayer_pattern):
89
+ self.bayer_pattern = bayer_pattern
90
+
91
+ def get_bayer_pattern(self):
92
+ return self.bayer_pattern
93
+
94
+ def set_bit_depth(self, bit_depth):
95
+ self.bit_depth = bit_depth
96
+
97
+ def get_bit_depth(self):
98
+ return self.bit_depth
99
+
100
+ def set_black_level(self, black_level):
101
+ self.black_level = black_level
102
+
103
+ def get_black_level(self):
104
+ return self.black_level
105
+
106
+ def set_white_level(self, white_level):
107
+ self.white_level = white_level
108
+
109
+ def get_white_level(self):
110
+ return self.white_level
111
+
112
+ def get_min_value(self):
113
+ return self.min_value
114
+
115
+ def get_max_value(self):
116
+ return self.max_value
117
+
118
+ def get_data_type(self):
119
+ return self.data_type
120
+
121
+ def __str__(self):
122
+ return "Image " + self.name + " info:" + \
123
+ "\n\tname:\t" + self.name + \
124
+ "\n\tsize:\t" + str(self.size) + \
125
+ "\n\tcolor space:\t" + self.color_space + \
126
+ "\n\tbayer pattern:\t" + self.bayer_pattern + \
127
+ "\n\tchannel gains:\t" + str(self.channel_gain) + \
128
+ "\n\tbit depth:\t" + str(self.bit_depth) + \
129
+ "\n\tdata type:\t" + str(self.data_type) + \
130
+ "\n\tblack level:\t" + str(self.black_level) + \
131
+ "\n\tminimum value:\t" + str(self.min_value) + \
132
+ "\n\tmaximum value:\t" + str(self.max_value)
133
+
134
+
135
+ # =============================================================
136
+ # function: black_level_correction
137
+ # subtracts the black level channel wise
138
+ # =============================================================
139
+ def black_level_correction(raw, black_level, white_level, clip_range):
140
+
141
+ print("----------------------------------------------------")
142
+ print("Running black level correction...")
143
+
144
+ # make float32 in case if it was not
145
+ black_level = np.float32(black_level)
146
+ white_level = np.float32(white_level)
147
+ raw = np.float32(raw)
148
+
149
+ # create new data so that original raw data do not change
150
+ data = np.zeros(raw.shape)
151
+
152
+ # bring data in range 0 to 1
153
+ data[::2, ::2] = (raw[::2, ::2] - black_level[0]) / (white_level[0] - black_level[0])
154
+ data[::2, 1::2] = (raw[::2, 1::2] - black_level[1]) / (white_level[1] - black_level[1])
155
+ data[1::2, ::2] = (raw[1::2, ::2] - black_level[2]) / (white_level[2] - black_level[2])
156
+ data[1::2, 1::2] = (raw[1::2, 1::2]- black_level[3]) / (white_level[3] - black_level[3])
157
+
158
+ # bring within the bit depth range
159
+ data = data * clip_range[1]
160
+
161
+ # clip within the range
162
+ data = np.clip(data, clip_range[0], clip_range[1]) # upper level not necessary
163
+ data = np.float32(data)
164
+
165
+ return data
166
+
167
+
168
+ # =============================================================
169
+ # function: channel_gain_white_balance
170
+ # multiply with the white balance channel gains
171
+ # =============================================================
172
+ def channel_gain_white_balance(data, channel_gain):
173
+
174
+ print("----------------------------------------------------")
175
+ print("Running channel gain white balance...")
176
+
177
+ # convert into float32 in case they were not
178
+ data = np.float32(data)
179
+ channel_gain = np.float32(channel_gain)
180
+
181
+ # multiply with the channel gains
182
+ data[::2, ::2] = data[::2, ::2] * channel_gain[0]
183
+ data[::2, 1::2] = data[::2, 1::2] * channel_gain[1]
184
+ data[1::2, ::2] = data[1::2, ::2] * channel_gain[2]
185
+ data[1::2, 1::2] = data[1::2, 1::2] * channel_gain[3]
186
+
187
+ # clipping within range
188
+ data = np.clip(data, 0., None) # upper level not necessary
189
+
190
+ return data
191
+
192
+
193
+ # =============================================================
194
+ # function: bad_pixel_correction
195
+ # correct for the bad (dead, stuck, or hot) pixels
196
+ # =============================================================
197
+ def bad_pixel_correction(data, neighborhood_size):
198
+
199
+ print("----------------------------------------------------")
200
+ print("Running bad pixel correction...")
201
+
202
+ if ((neighborhood_size % 2) == 0):
203
+ print("neighborhood_size shoud be odd number, recommended value 3")
204
+ return data
205
+
206
+ # convert to float32 in case they were not
207
+ # Being consistent in data format to be float32
208
+ data = np.float32(data)
209
+
210
+ # Separate out the quarter resolution images
211
+ D = {} # Empty dictionary
212
+ D[0] = data[::2, ::2]
213
+ D[1] = data[::2, 1::2]
214
+ D[2] = data[1::2, ::2]
215
+ D[3] = data[1::2, 1::2]
216
+
217
+ # number of pixels to be padded at the borders
218
+ no_of_pixel_pad = math.floor(neighborhood_size / 2.)
219
+
220
+ for idx in range(0, len(D)): # perform same operation for each quarter
221
+
222
+ # display progress
223
+ print("bad pixel correction: Quarter " + str(idx+1) + " of 4")
224
+
225
+ img = D[idx]
226
+ width, height = utility.helpers(img).get_width_height()
227
+
228
+ # pad pixels at the borders
229
+ img = np.pad(img, \
230
+ (no_of_pixel_pad, no_of_pixel_pad),\
231
+ 'reflect') # reflect would not repeat the border value
232
+
233
+ for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
234
+ for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
235
+
236
+ # save the middle pixel value
237
+ mid_pixel_val = img[i, j]
238
+
239
+ # extract the neighborhood
240
+ neighborhood = img[i - no_of_pixel_pad : i + no_of_pixel_pad+1,\
241
+ j - no_of_pixel_pad : j + no_of_pixel_pad+1]
242
+
243
+ # set the center pixels value same as the left pixel
244
+ # Does not matter replace with right or left pixel
245
+ # is used to replace the center pixels value
246
+ neighborhood[no_of_pixel_pad, no_of_pixel_pad] = neighborhood[no_of_pixel_pad, no_of_pixel_pad-1]
247
+
248
+ min_neighborhood = np.min(neighborhood)
249
+ max_neighborhood = np.max(neighborhood)
250
+
251
+ if (mid_pixel_val < min_neighborhood):
252
+ img[i,j] = min_neighborhood
253
+ elif (mid_pixel_val > max_neighborhood):
254
+ img[i,j] = max_neighborhood
255
+ else:
256
+ img[i,j] = mid_pixel_val
257
+
258
+ # Put the corrected image to the dictionary
259
+ D[idx] = img[no_of_pixel_pad : height + no_of_pixel_pad,\
260
+ no_of_pixel_pad : width + no_of_pixel_pad]
261
+
262
+ # Regrouping the data
263
+ data[::2, ::2] = D[0]
264
+ data[::2, 1::2] = D[1]
265
+ data[1::2, ::2] = D[2]
266
+ data[1::2, 1::2] = D[3]
267
+
268
+ return data
269
+
270
+
271
+ # =============================================================
272
+ # class: demosaic
273
+ # =============================================================
274
+ class demosaic:
275
+ def __init__(self, data, bayer_pattern="rggb", clip_range=[0, 65535], name="demosaic"):
276
+ self.data = np.float32(data)
277
+ self.bayer_pattern = bayer_pattern
278
+ self.clip_range = clip_range
279
+ self.name = name
280
+
281
+ def mhc(self, timeshow=False):
282
+
283
+ print("----------------------------------------------------")
284
+ print("Running demosaicing using Malvar-He-Cutler algorithm...")
285
+
286
+ return debayer.debayer_mhc(self.data, self.bayer_pattern, self.clip_range, timeshow)
287
+
288
+ def post_process_local_color_ratio(self, beta):
289
+ # Objective is to reduce high chroma jump
290
+ # Beta is controlling parameter, higher gives more effect,
291
+ # however, too high does not make any more change
292
+
293
+ print("----------------------------------------------------")
294
+ print("Demosaicing post process using local color ratio...")
295
+
296
+ data = self.data
297
+
298
+ # add beta with the data to prevent divide by zero
299
+ data_beta = self.data + beta
300
+
301
+ # convolution kernels
302
+ # zeta1 averages the up, down, left, and right four values of a 3x3 window
303
+ zeta1 = np.multiply([[0., 1., 0.], [1., 0., 1.], [0., 1., 0.]], .25)
304
+ # zeta2 averages the four corner values of a 3x3 window
305
+ zeta2 = np.multiply([[1., 0., 1.], [0., 0., 0.], [1., 0., 1.]], .25)
306
+
307
+ # average of color ratio
308
+ g_over_b = signal.convolve2d(np.divide(data_beta[:, :, 1], data_beta[:, :, 2]), zeta1, mode="same", boundary="symm")
309
+ g_over_r = signal.convolve2d(np.divide(data_beta[:, :, 1], data_beta[:, :, 0]), zeta1, mode="same", boundary="symm")
310
+ b_over_g_zeta2 = signal.convolve2d(np.divide(data_beta[:, :, 2], data_beta[:, :, 1]), zeta2, mode="same", boundary="symm")
311
+ r_over_g_zeta2 = signal.convolve2d(np.divide(data_beta[:, :, 0], data_beta[:, :, 1]), zeta2, mode="same", boundary="symm")
312
+ b_over_g_zeta1 = signal.convolve2d(np.divide(data_beta[:, :, 2], data_beta[:, :, 1]), zeta1, mode="same", boundary="symm")
313
+ r_over_g_zeta1 = signal.convolve2d(np.divide(data_beta[:, :, 0], data_beta[:, :, 1]), zeta1, mode="same", boundary="symm")
314
+
315
+ # G at B locations and G at R locations
316
+ if self.bayer_pattern == "rggb":
317
+ # G at B locations
318
+ data[1::2, 1::2, 1] = -beta + np.multiply(data_beta[1::2, 1::2, 2], g_over_b[1::2, 1::2])
319
+ # G at R locations
320
+ data[::2, ::2, 1] = -beta + np.multiply(data_beta[::2, ::2, 0], g_over_r[::2, ::2])
321
+ # B at R locations
322
+ data[::2, ::2, 2] = -beta + np.multiply(data_beta[::2, ::2, 1], b_over_g_zeta2[::2, ::2])
323
+ # R at B locations
324
+ data[1::2, 1::2, 0] = -beta + np.multiply(data_beta[1::2, 1::2, 1], r_over_g_zeta2[1::2, 1::2])
325
+ # B at G locations
326
+ data[::2, 1::2, 2] = -beta + np.multiply(data_beta[::2, 1::2, 1], b_over_g_zeta1[::2, 1::2])
327
+ data[1::2, ::2, 2] = -beta + np.multiply(data_beta[1::2, ::2, 1], b_over_g_zeta1[1::2, ::2])
328
+ # R at G locations
329
+ data[::2, 1::2, 0] = -beta + np.multiply(data_beta[::2, 1::2, 1], r_over_g_zeta1[::2, 1::2])
330
+ data[1::2, ::2, 0] = -beta + np.multiply(data_beta[1::2, ::2, 1], r_over_g_zeta1[1::2, ::2])
331
+
332
+ elif self.bayer_pattern == "grbg":
333
+ # G at B locations
334
+ data[1::2, ::2, 1] = -beta + np.multiply(data_beta[1::2, ::2, 2], g_over_b[1::2, 1::2])
335
+ # G at R locations
336
+ data[::2, 1::2, 1] = -beta + np.multiply(data_beta[::2, 1::2, 0], g_over_r[::2, 1::2])
337
+ # B at R locations
338
+ data[::2, 1::2, 2] = -beta + np.multiply(data_beta[::2, 1::2, 1], b_over_g_zeta2[::2, 1::2])
339
+ # R at B locations
340
+ data[1::2, ::2, 0] = -beta + np.multiply(data_beta[1::2, ::2, 1], r_over_g_zeta2[1::2, ::2])
341
+ # B at G locations
342
+ data[::2, ::2, 2] = -beta + np.multiply(data_beta[::2, ::2, 1], b_over_g_zeta1[::2, ::2])
343
+ data[1::2, 1::2, 2] = -beta + np.multiply(data_beta[1::2, 1::2, 1], b_over_g_zeta1[1::2, 1::2])
344
+ # R at G locations
345
+ data[::2, ::2, 0] = -beta + np.multiply(data_beta[::2, ::2, 1], r_over_g_zeta1[::2, ::2])
346
+ data[1::2, 1::2, 0] = -beta + np.multiply(data_beta[1::2, 1::2, 1], r_over_g_zeta1[1::2, 1::2])
347
+
348
+ elif self.bayer_pattern == "gbrg":
349
+ # G at B locations
350
+ data[::2, 1::2, 1] = -beta + np.multiply(data_beta[::2, 1::2, 2], g_over_b[::2, 1::2])
351
+ # G at R locations
352
+ data[1::2, ::2, 1] = -beta + np.multiply(data_beta[1::2, ::2, 0], g_over_r[1::2, ::2])
353
+ # B at R locations
354
+ data[1::2, ::2, 2] = -beta + np.multiply(data_beta[1::2, ::2, 1], b_over_g_zeta2[1::2, ::2])
355
+ # R at B locations
356
+ data[::2, 1::2, 0] = -beta + np.multiply(data_beta[::2, 1::2, 1], r_over_g_zeta2[::2, 1::2])
357
+ # B at G locations
358
+ data[::2, ::2, 2] = -beta + np.multiply(data_beta[::2, ::2, 1], b_over_g_zeta1[::2, ::2])
359
+ data[1::2, 1::2, 2] = -beta + np.multiply(data_beta[1::2, 1::2, 1], b_over_g_zeta1[1::2, 1::2])
360
+ # R at G locations
361
+ data[::2, ::2, 0] = -beta + np.multiply(data_beta[::2, ::2, 1], r_over_g_zeta1[::2, ::2])
362
+ data[1::2, 1::2, 0] = -beta + np.multiply(data_beta[1::2, 1::2, 1], r_over_g_zeta1[1::2, 1::2])
363
+
364
+ elif self.bayer_pattern == "bggr":
365
+ # G at B locations
366
+ data[::2, ::2, 1] = -beta + np.multiply(data_beta[::2, ::2, 2], g_over_b[::2, ::2])
367
+ # G at R locations
368
+ data[1::2, 1::2, 1] = -beta + np.multiply(data_beta[1::2, 1::2, 0], g_over_r[1::2, 1::2])
369
+ # B at R locations
370
+ data[1::2, 1::2, 2] = -beta + np.multiply(data_beta[1::2, 1::2, 1], b_over_g_zeta2[1::2, 1::2])
371
+ # R at B locations
372
+ data[::2, ::2, 0] = -beta + np.multiply(data_beta[::2, ::2, 1], r_over_g_zeta2[::2, ::2])
373
+ # B at G locations
374
+ data[::2, 1::2, 2] = -beta + np.multiply(data_beta[::2, 1::2, 1], b_over_g_zeta1[::2, 1::2])
375
+ data[1::2, ::2, 2] = -beta + np.multiply(data_beta[1::2, ::2, 1], b_over_g_zeta1[1::2, ::2])
376
+ # R at G locations
377
+ data[::2, 1::2, 0] = -beta + np.multiply(data_beta[::2, 1::2, 1], r_over_g_zeta1[::2, 1::2])
378
+ data[1::2, ::2, 0] = -beta + np.multiply(data_beta[1::2, ::2, 1], r_over_g_zeta1[1::2, ::2])
379
+
380
+
381
+ return np.clip(data, self.clip_range[0], self.clip_range[1])
382
+
383
+
384
+ def directionally_weighted_gradient_based_interpolation(self):
385
+ # Reference:
386
+ # http://www.arl.army.mil/arlreports/2010/ARL-TR-5061.pdf
387
+
388
+ print("----------------------------------------------------")
389
+ print("Running demosaicing using directionally weighted gradient based interpolation...")
390
+
391
+ # Fill up the green channel
392
+ G = debayer.fill_channel_directional_weight(self.data, self.bayer_pattern)
393
+
394
+ B, R = debayer.fill_br_locations(self.data, G, self.bayer_pattern)
395
+
396
+ width, height = utility.helpers(self.data).get_width_height()
397
+ output = np.empty((height, width, 3), dtype=np.float32)
398
+ output[:, :, 0] = R
399
+ output[:, :, 1] = G
400
+ output[:, :, 2] = B
401
+
402
+ return np.clip(output, self.clip_range[0], self.clip_range[1])
403
+
404
+
405
+ def post_process_median_filter(self, edge_detect_kernel_size=3, edge_threshold=0, median_filter_kernel_size=3, clip_range=[0, 65535]):
406
+ # Objective is to reduce the zipper effect around the edges
407
+ # Inputs:
408
+ # edge_detect_kernel_size: the neighborhood size used to detect edges
409
+ # edge_threshold: the threshold value above which (compared against)
410
+ # the gradient_magnitude to declare if it is an edge
411
+ # median_filter_kernel_size: the neighborhood size used to perform
412
+ # median filter operation
413
+ # clip_range: used for scaling in edge_detection
414
+ #
415
+ # Output:
416
+ # output: median filtered output around the edges
417
+ # edge_location: a debug image to see where the edges were detected
418
+ # based on the threshold
419
+
420
+
421
+ # detect edge locations
422
+ edge_location = utility.edge_detection(self.data).sobel(edge_detect_kernel_size, "is_edge", edge_threshold, clip_range)
423
+
424
+ # allocate space for output
425
+ output = np.empty(np.shape(self.data), dtype=np.float32)
426
+
427
+ if (np.ndim(self.data) > 2):
428
+
429
+ for i in range(0, np.shape(self.data)[2]):
430
+ output[:, :, i] = utility.helpers(self.data[:, :, i]).edge_wise_median(median_filter_kernel_size, edge_location[:, :, i])
431
+
432
+ elif (np.ndim(self.data) == 2):
433
+ output = utility.helpers(self.data).edge_wise_median(median_filter_kernel_size, edge_location)
434
+
435
+ return output, edge_location
436
+
437
+ def __str__(self):
438
+ return self.name
439
+
440
+
441
+ # =============================================================
442
+ # class: lens_shading_correction
443
+ # Correct the lens shading / vignetting
444
+ # =============================================================
445
+ class lens_shading_correction:
446
+ def __init__(self, data, name="lens_shading_correction"):
447
+ # convert to float32 in case it was not
448
+ self.data = np.float32(data)
449
+ self.name = name
450
+
451
+ def flat_field_compensation(self, dark_current_image, flat_field_image):
452
+ # dark_current_image:
453
+ # is captured from the camera with cap on
454
+ # and fully dark condition, several images captured and
455
+ # temporally averaged
456
+ # flat_field_image:
457
+ # is found by capturing an image of a flat field test chart
458
+ # with certain lighting condition
459
+ # Note: flat_field_compensation is memory intensive procedure because
460
+ # both the dark_current_image and flat_field_image need to be
461
+ # saved in memory beforehand
462
+ print("----------------------------------------------------")
463
+ print("Running lens shading correction with flat field compensation...")
464
+
465
+ # convert to float32 in case it was not
466
+ dark_current_image = np.float32(dark_current_image)
467
+ flat_field_image = np.float32(flat_field_image)
468
+ temp = flat_field_image - dark_current_image
469
+ return np.average(temp) * np.divide((self.data - dark_current_image), temp)
470
+
471
+ def approximate_mathematical_compensation(self, params, clip_min=0, clip_max=65535):
472
+ # parms:
473
+ # parameters of a parabolic model y = a*(x-b)^2 + c
474
+ # For example, params = [0.01759, -28.37, -13.36]
475
+ # Note: approximate_mathematical_compensation require less memory
476
+ print("----------------------------------------------------")
477
+ print("Running lens shading correction with approximate mathematical compensation...")
478
+ width, height = utility.helpers(self.data).get_width_height()
479
+
480
+ center_pixel_pos = [height/2, width/2]
481
+ max_distance = utility.distance_euclid(center_pixel_pos, [height, width])
482
+
483
+ # allocate memory for output
484
+ temp = np.empty((height, width), dtype=np.float32)
485
+
486
+ for i in range(0, height):
487
+ for j in range(0, width):
488
+ distance = utility.distance_euclid(center_pixel_pos, [i, j]) / max_distance
489
+ # parabolic model
490
+ gain = params[0] * (distance - params[1])**2 + params[2]
491
+ temp[i, j] = self.data[i, j] * gain
492
+
493
+ temp = np.clip(temp, clip_min, clip_max)
494
+ return temp
495
+
496
+ def __str__(self):
497
+ return "lens shading correction. There are two methods: " + \
498
+ "\n (1) flat_field_compensation: requires dark_current_image and flat_field_image" + \
499
+ "\n (2) approximate_mathematical_compensation:"
500
+
501
+
502
+ # =============================================================
503
+ # class: lens_shading_correction
504
+ # Correct the lens shading / vignetting
505
+ # =============================================================
506
+ class bayer_denoising:
507
+ def __init__(self, data, name="bayer_denoising"):
508
+ # convert to float32 in case it was not
509
+ self.data = np.float32(data)
510
+ self.name = name
511
+
512
+ def utilize_hvs_behavior(self, bayer_pattern, initial_noise_level, hvs_min, hvs_max, threshold_red_blue, clip_range):
513
+ # Objective: bayer denoising
514
+ # Inputs:
515
+ # bayer_pattern: rggb, gbrg, grbg, bggr
516
+ # initial_noise_level:
517
+ # Output:
518
+ # denoised bayer raw output
519
+ # Source: Based on paper titled "Noise Reduction for CFA Image Sensors
520
+ # Exploiting HVS Behaviour," by Angelo Bosco, Sebastiano Battiato,
521
+ # Arcangelo Bruna and Rosetta Rizzo
522
+ # Sensors 2009, 9, 1692-1713; doi:10.3390/s90301692
523
+
524
+ print("----------------------------------------------------")
525
+ print("Running bayer denoising utilizing hvs behavior...")
526
+
527
+ # copy the self.data to raw and we will only work on raw
528
+ # to make sure no change happen to self.data
529
+ raw = self.data
530
+ raw = np.clip(raw, clip_range[0], clip_range[1])
531
+ width, height = utility.helpers(raw).get_width_height()
532
+
533
+ # First make the bayer_pattern rggb
534
+ # The algorithm is written only for rggb pattern, thus convert all other
535
+ # pattern to rggb. Furthermore, this shuffling does not affect the
536
+ # algorithm output
537
+ if (bayer_pattern != "rggb"):
538
+ raw = utility.helpers(self.data).shuffle_bayer_pattern(bayer_pattern, "rggb")
539
+
540
+ # fixed neighborhood_size
541
+ neighborhood_size = 5 # we are keeping this fixed
542
+ # bigger size such as 9 can be declared
543
+ # however, the code need to be changed then
544
+
545
+ # pad two pixels at the border
546
+ no_of_pixel_pad = math.floor(neighborhood_size / 2) # number of pixels to pad
547
+
548
+ raw = np.pad(raw, \
549
+ (no_of_pixel_pad, no_of_pixel_pad),\
550
+ 'reflect') # reflect would not repeat the border value
551
+
552
+ # allocating space for denoised output
553
+ denoised_out = np.empty((height, width), dtype=np.float32)
554
+
555
+ texture_degree_debug = np.empty((height, width), dtype=np.float32)
556
+ for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
557
+ for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
558
+
559
+ # center pixel
560
+ center_pixel = raw[i, j]
561
+
562
+ # signal analyzer block
563
+ half_max = clip_range[1] / 2
564
+ if (center_pixel <= half_max):
565
+ hvs_weight = -(((hvs_max - hvs_min) * center_pixel) / half_max) + hvs_max
566
+ else:
567
+ hvs_weight = (((center_pixel - clip_range[1]) * (hvs_max - hvs_min))/(clip_range[1] - half_max)) + hvs_max
568
+
569
+ # noise level estimator previous value
570
+ if (j < no_of_pixel_pad+2):
571
+ noise_level_previous_red = initial_noise_level
572
+ noise_level_previous_blue = initial_noise_level
573
+ noise_level_previous_green = initial_noise_level
574
+ else:
575
+ noise_level_previous_green = noise_level_current_green
576
+ if ((i % 2) == 0): # red
577
+ noise_level_previous_red = noise_level_current_red
578
+ elif ((i % 2) != 0): # blue
579
+ noise_level_previous_blue = noise_level_current_blue
580
+
581
+ # Processings depending on Green or Red/Blue
582
+ # Red
583
+ if (((i % 2) == 0) and ((j % 2) == 0)):
584
+ # get neighborhood
585
+ neighborhood = [raw[i-2, j-2], raw[i-2, j], raw[i-2, j+2],\
586
+ raw[i, j-2], raw[i, j+2],\
587
+ raw[i+2, j-2], raw[i+2, j], raw[i+2, j+2]]
588
+
589
+ # absolute difference from the center pixel
590
+ d = np.abs(neighborhood - center_pixel)
591
+
592
+ # maximum and minimum difference
593
+ d_max = np.max(d)
594
+ d_min = np.min(d)
595
+
596
+ # calculate texture_threshold
597
+ texture_threshold = hvs_weight + noise_level_previous_red
598
+
599
+ # texture degree analyzer
600
+ if (d_max <= threshold_red_blue):
601
+ texture_degree = 1.
602
+ elif ((d_max > threshold_red_blue) and (d_max <= texture_threshold)):
603
+ texture_degree = -((d_max - threshold_red_blue) / (texture_threshold - threshold_red_blue)) + 1.
604
+ elif (d_max > texture_threshold):
605
+ texture_degree = 0.
606
+
607
+ # noise level estimator update
608
+ noise_level_current_red = texture_degree * d_max + (1 - texture_degree) * noise_level_previous_red
609
+
610
+ # Blue
611
+ elif (((i % 2) != 0) and ((j % 2) != 0)):
612
+
613
+ # get neighborhood
614
+ neighborhood = [raw[i-2, j-2], raw[i-2, j], raw[i-2, j+2],\
615
+ raw[i, j-2], raw[i, j+2],\
616
+ raw[i+2, j-2], raw[i+2, j], raw[i+2, j+2]]
617
+
618
+ # absolute difference from the center pixel
619
+ d = np.abs(neighborhood - center_pixel)
620
+
621
+ # maximum and minimum difference
622
+ d_max = np.max(d)
623
+ d_min = np.min(d)
624
+
625
+ # calculate texture_threshold
626
+ texture_threshold = hvs_weight + noise_level_previous_blue
627
+
628
+ # texture degree analyzer
629
+ if (d_max <= threshold_red_blue):
630
+ texture_degree = 1.
631
+ elif ((d_max > threshold_red_blue) and (d_max <= texture_threshold)):
632
+ texture_degree = -((d_max - threshold_red_blue) / (texture_threshold - threshold_red_blue)) + 1.
633
+ elif (d_max > texture_threshold):
634
+ texture_degree = 0.
635
+
636
+ # noise level estimator update
637
+ noise_level_current_blue = texture_degree * d_max + (1 - texture_degree) * noise_level_previous_blue
638
+
639
+ # Green
640
+ elif ((((i % 2) == 0) and ((j % 2) != 0)) or (((i % 2) != 0) and ((j % 2) == 0))):
641
+
642
+ neighborhood = [raw[i-2, j-2], raw[i-2, j], raw[i-2, j+2],\
643
+ raw[i-1, j-1], raw[i-1, j+1],\
644
+ raw[i, j-2], raw[i, j+2],\
645
+ raw[i+1, j-1], raw[i+1, j+1],\
646
+ raw[i+2, j-2], raw[i+2, j], raw[i+2, j+2]]
647
+
648
+ # difference from the center pixel
649
+ d = np.abs(neighborhood - center_pixel)
650
+
651
+ # maximum and minimum difference
652
+ d_max = np.max(d)
653
+ d_min = np.min(d)
654
+
655
+ # calculate texture_threshold
656
+ texture_threshold = hvs_weight + noise_level_previous_green
657
+
658
+ # texture degree analyzer
659
+ if (d_max == 0):
660
+ texture_degree = 1
661
+ elif ((d_max > 0) and (d_max <= texture_threshold)):
662
+ texture_degree = -(d_max / texture_threshold) + 1.
663
+ elif (d_max > texture_threshold):
664
+ texture_degree = 0
665
+
666
+ # noise level estimator update
667
+ noise_level_current_green = texture_degree * d_max + (1 - texture_degree) * noise_level_previous_green
668
+
669
+ # similarity threshold calculation
670
+ if (texture_degree == 1):
671
+ threshold_low = threshold_high = d_max
672
+ elif (texture_degree == 0):
673
+ threshold_low = d_min
674
+ threshold_high = (d_max + d_min) / 2
675
+ elif ((texture_degree > 0) and (texture_degree < 1)):
676
+ threshold_high = (d_max + ((d_max + d_min) / 2)) / 2
677
+ threshold_low = (d_min + threshold_high) / 2
678
+
679
+ # weight computation
680
+ weight = np.empty(np.size(d), dtype=np.float32)
681
+ pf = 0.
682
+ for w_i in range(0, np.size(d)):
683
+ if (d[w_i] <= threshold_low):
684
+ weight[w_i] = 1.
685
+ elif (d[w_i] > threshold_high):
686
+ weight[w_i] = 0.
687
+ elif ((d[w_i] > threshold_low) and (d[w_i] < threshold_high)):
688
+ weight[w_i] = 1. + ((d[w_i] - threshold_low) / (threshold_low - threshold_high))
689
+
690
+ pf += weight[w_i] * neighborhood[w_i] + (1. - weight[w_i]) * center_pixel
691
+
692
+ denoised_out[i - no_of_pixel_pad, j-no_of_pixel_pad] = pf / np.size(d)
693
+ # texture_degree_debug is a debug output
694
+ texture_degree_debug[i - no_of_pixel_pad, j-no_of_pixel_pad] = texture_degree
695
+
696
+ if (bayer_pattern != "rggb"):
697
+ denoised_out = utility.shuffle_bayer_pattern(denoised_out, "rggb", bayer_pattern)
698
+
699
+ return np.clip(denoised_out, clip_range[0], clip_range[1]), texture_degree_debug
700
+
701
+ def __str__(self):
702
+ return self.name
703
+
704
+
705
+ # =============================================================
706
+ # class: color_correction
707
+ # Correct the color in linaer domain
708
+ # =============================================================
709
+ class color_correction:
710
+ def __init__(self, data, color_matrix, color_space="srgb", illuminant="d65", name="color correction", clip_range=[0, 65535]):
711
+ # Inputs:
712
+ # data: linear rgb image before nonlinearity/gamma
713
+ # xyz2cam: 3x3 matrix found from the camera metedata, specifically
714
+ # color matrix 2 from the metadata
715
+ # color_space: output color space
716
+ # illuminance: the illuminant of the lighting condition
717
+ # name: name of the class
718
+ self.data = np.float32(data)
719
+ self.xyz2cam = np.float32(color_matrix)
720
+ self.color_space = color_space
721
+ self.illuminant = illuminant
722
+ self.name = name
723
+ self.clip_range = clip_range
724
+
725
+ def get_rgb2xyz(self):
726
+ # Objective: get the rgb2xyz matrix dependin on the output color space
727
+ # and the illuminant
728
+ # Source: http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
729
+ if (self.color_space == "srgb"):
730
+ if (self.illuminant == "d65"):
731
+ return [[.4124564, .3575761, .1804375],\
732
+ [.2126729, .7151522, .0721750],\
733
+ [.0193339, .1191920, .9503041]]
734
+ elif (self.illuminant == "d50"):
735
+ return [[.4360747, .3850649, .1430804],\
736
+ [.2225045, .7168786, .0606169],\
737
+ [.0139322, .0971045, .7141733]]
738
+ else:
739
+ print("for now, color_space must be d65 or d50")
740
+ return
741
+
742
+ elif (self.color_space == "adobe-rgb-1998"):
743
+ if (self.illuminant == "d65"):
744
+ return [[.5767309, .1855540, .1881852],\
745
+ [.2973769, .6273491, .0752741],\
746
+ [.0270343, .0706872, .9911085]]
747
+ elif (self.illuminant == "d50"):
748
+ return [[.6097559, .2052401, .1492240],\
749
+ [.3111242, .6256560, .0632197],\
750
+ [.0194811, .0608902, .7448387]]
751
+ else:
752
+ print("for now, illuminant must be d65 or d50")
753
+ return
754
+ else:
755
+ print("for now, color_space must be srgb or adobe-rgb-1998")
756
+ return
757
+
758
+ def calculate_cam2rgb(self):
759
+ # Objective: Calculates the color correction matrix
760
+
761
+ # matric multiplication
762
+ rgb2cam = np.dot(self.xyz2cam, self.get_rgb2xyz())
763
+
764
+ # make sum of each row to be 1.0, necessary to preserve white balance
765
+ # basically divice each value by its row wise sum
766
+ rgb2cam = np.divide(rgb2cam, np.reshape(np.sum(rgb2cam, 1), [3, 1]))
767
+
768
+ # - inverse the matrix to get cam2rgb.
769
+ # - cam2rgb should also have the characteristic that sum of each row
770
+ # equal to 1.0 to preserve white balance
771
+ # - check if rgb2cam is invertible by checking the condition of
772
+ # rgb2cam. If rgb2cam is singular it will give a warning and
773
+ # return an identiry matrix
774
+ if (np.linalg.cond(rgb2cam) < (1 / sys.float_info.epsilon)):
775
+ return np.linalg.inv(rgb2cam) # this is cam2rgb / color correction matrix
776
+ else:
777
+ print("Warning! matrix not invertible.")
778
+ return np.identity(3, dtype=np.float32)
779
+
780
+ def apply_cmatrix(self):
781
+ # Objective: Apply the color correction matrix (cam2rgb)
782
+
783
+ print("----------------------------------------------------")
784
+ print("running color correction...")
785
+
786
+ # check if data is 3 dimensional
787
+ if (np.ndim(self.data) != 3):
788
+ print("data need to be three dimensional")
789
+ return
790
+
791
+ # get the color correction matrix
792
+ cam2rgb = self.calculate_cam2rgb()
793
+
794
+ # get width and height
795
+ width, height = utility.helpers(self.data).get_width_height()
796
+
797
+ # apply the matrix
798
+ R = self.data[:, :, 0]
799
+ G = self.data[:, :, 1]
800
+ B = self.data[:, :, 2]
801
+
802
+ color_corrected = np.empty((height, width, 3), dtype=np.float32)
803
+ color_corrected[:, :, 0] = R * cam2rgb[0, 0] + G * cam2rgb[0, 1] + B * cam2rgb[0, 2]
804
+ color_corrected[:, :, 1] = R * cam2rgb[1, 0] + G * cam2rgb[1, 1] + B * cam2rgb[1, 2]
805
+ color_corrected[:, :, 2] = R * cam2rgb[2, 0] + G * cam2rgb[2, 1] + B * cam2rgb[2, 2]
806
+
807
+ return np.clip(color_corrected, self.clip_range[0], self.clip_range[1])
808
+
809
+ def __str__(self):
810
+ return self.name
811
+
812
+
813
+ # =============================================================
814
+ # class: nonlinearity
815
+ # apply gamma or degamma
816
+ # =============================================================
817
+ class nonlinearity:
818
+ def __init__(self, data, name="nonlinearity"):
819
+ self.data = np.float32(data)
820
+ self.name = name
821
+
822
+ def luma_adjustment(self, multiplier, clip_range=[0, 65535]):
823
+ # The multiplier is applied only on luma channel
824
+ # by a multipler in log10 scale:
825
+ # multipler 10 means multiplied by 1.
826
+ # multipler 100 means multiplied by 2. as such
827
+
828
+ print("----------------------------------------------------")
829
+ print("Running brightening...")
830
+
831
+ return np.clip(np.log10(multiplier) * self.data, clip_range[0], clip_range[1])
832
+
833
+ def by_value(self, value, clip_range):
834
+
835
+ print("----------------------------------------------------")
836
+ print("Running nonlinearity by value...")
837
+
838
+ # clip within the range
839
+ data = np.clip(self.data, clip_range[0], clip_range[1])
840
+ # make 0 to 1
841
+ data = data / clip_range[1]
842
+ # apply nonlinearity
843
+ return np.clip(clip_range[1] * (data**value), clip_range[0], clip_range[1])
844
+
845
+ def by_table(self, table, nonlinearity_type="gamma", clip_range=[0, 65535]):
846
+
847
+ print("----------------------------------------------------")
848
+ print("Running nonlinearity by table...")
849
+
850
+ gamma_table = np.loadtxt(table)
851
+ gamma_table = clip_range[1] * gamma_table / np.max(gamma_table)
852
+ linear_table = np.linspace(clip_range[0], clip_range[1], np.size(gamma_table))
853
+
854
+ # linear interpolation, query is the self.data
855
+ if (nonlinearity_type == "gamma"):
856
+ # mapping is from linear_table to gamma_table
857
+ return np.clip(np.interp(self.data, linear_table, gamma_table), clip_range[0], clip_range[1])
858
+ elif (nonlinearity_type == "degamma"):
859
+ # mapping is from gamma_table to linear_table
860
+ return np.clip(np.interp(self.data, gamma_table, linear_table), clip_range[0], clip_range[1])
861
+
862
+ def by_equation(self, a, b, clip_range):
863
+
864
+ print("----------------------------------------------------")
865
+ print("Running nonlinearity by equation...")
866
+
867
+ # clip within the range
868
+ data = np.clip(self.data, clip_range[0], clip_range[1])
869
+ # make 0 to 1
870
+ data = data / clip_range[1]
871
+
872
+ # apply nonlinearity
873
+ return np.clip(clip_range[1] * (a * np.exp(b * data) + data + a * data - a * np.exp(b) * data - a), clip_range[0], clip_range[1])
874
+
875
+ def __str__(self):
876
+ return self.name
877
+
878
+
879
+ # =============================================================
880
+ # class: tone_mapping
881
+ # improve the overall tone of the image
882
+ # =============================================================
883
+ class tone_mapping:
884
+ def __init__(self, data, name="tone mapping"):
885
+ self.data = np.float32(data)
886
+ self.name = name
887
+
888
+ def nonlinear_masking(self, strength_multiplier=1.0, gaussian_kernel_size=[5, 5], gaussian_sigma=1.0, clip_range=[0, 65535]):
889
+ # Objective: improves the overall tone of the image
890
+ # Inputs:
891
+ # strength_multiplier: >0. The higher the more aggressing tone mapping
892
+ # gaussian_kernel_size: kernel size for calculating the mask image
893
+ # gaussian_sigma: spread of the gaussian kernel for calculating the
894
+ # mask image
895
+ #
896
+ # Source:
897
+ # N. Moroney, “Local color correction using non-linear masking”,
898
+ # Proc. IS&T/SID 8th Color Imaging Conference, pp. 108-111, (2000)
899
+ #
900
+ # Note, Slight changes is carried by mushfiqul alam, specifically
901
+ # introducing the strength_multiplier
902
+
903
+ print("----------------------------------------------------")
904
+ print("Running tone mapping by non linear masking...")
905
+
906
+ # convert to gray image
907
+ if (np.ndim(self.data) == 3):
908
+ gray_image = utility.color_conversion(self.data).rgb2gray()
909
+ else:
910
+ gray_image = self.data
911
+
912
+ # gaussian blur the gray image
913
+ gaussian_kernel = utility.create_filter().gaussian(gaussian_kernel_size, gaussian_sigma)
914
+
915
+ # the mask image: (1) blur
916
+ # (2) bring within range 0 to 1
917
+ # (3) multiply with strength_multiplier
918
+ mask = signal.convolve2d(gray_image, gaussian_kernel, mode="same", boundary="symm")
919
+ mask = strength_multiplier * mask / clip_range[1]
920
+
921
+ # calculate the alpha image
922
+ temp = np.power(0.5, mask)
923
+ if (np.ndim(self.data) == 3):
924
+ width, height = utility.helpers(self.data).get_width_height()
925
+ alpha = np.empty((height, width, 3), dtype=np.float32)
926
+ alpha[:, :, 0] = temp
927
+ alpha[:, :, 1] = temp
928
+ alpha[:, :, 2] = temp
929
+ else:
930
+ alpha = temp
931
+
932
+ # output
933
+ return np.clip(clip_range[1] * np.power(self.data/clip_range[1], alpha), clip_range[0], clip_range[1])
934
+
935
+ def dynamic_range_compression(self, drc_type="normal", drc_bound=[-40., 260.], clip_range=[0, 65535]):
936
+
937
+ ycc = utility.color_conversion(self.data).rgb2ycc("bt601")
938
+ y = ycc[:, :, 0]
939
+ cb = ycc[:, :, 1]
940
+ cr = ycc[:, :, 2]
941
+
942
+ if (drc_type == "normal"):
943
+ edge = y
944
+ elif (drc_type == "joint"):
945
+ edge = utility.edge_detection(y).sobel(3, "gradient_magnitude")
946
+
947
+ y_bilateral_filtered = utility.special_function(y).bilateral_filter(edge)
948
+ detail = np.divide(ycc[:, :, 0], y_bilateral_filtered)
949
+
950
+ C = drc_bound[0] * clip_range[1] / 255.
951
+ temp = drc_bound[1] * clip_range[1] / 255.
952
+ F = (temp * (C + clip_range[1])) / (clip_range[1] * (temp - C))
953
+ y_bilateral_filtered_contrast_reduced = F * (y_bilateral_filtered - (clip_range[1] / 2.)) + (clip_range[1] / 2.)
954
+
955
+ y_out = np.multiply(y_bilateral_filtered_contrast_reduced, detail)
956
+
957
+ ycc_out = ycc
958
+ ycc_out[:, :, 0] = y_out
959
+ rgb_out = utility.color_conversion(ycc_out).ycc2rgb("bt601")
960
+
961
+ return np.clip(rgb_out, clip_range[0], clip_range[1])
962
+
963
+
964
+ # =============================================================
965
+ # class: sharpening
966
+ # sharpens the image
967
+ # =============================================================
968
+ class sharpening:
969
+ def __init__(self, data, name="sharpening"):
970
+ self.data = np.float32(data)
971
+ self.name = name
972
+
973
+ def unsharp_masking(self, gaussian_kernel_size=[5, 5], gaussian_sigma=2.0,\
974
+ slope=1.5, tau_threshold=0.05, gamma_speed=4., clip_range=[0, 65535]):
975
+ # Objective: sharpen image
976
+ # Input:
977
+ # gaussian_kernel_size: dimension of the gaussian blur filter kernel
978
+ #
979
+ # gaussian_sigma: spread of the gaussian blur filter kernel
980
+ # bigger sigma more sharpening
981
+ #
982
+ # slope: controls the boost.
983
+ # the amount of sharpening, higher slope
984
+ # means more aggresssive sharpening
985
+ #
986
+ # tau_threshold: controls the amount of coring.
987
+ # threshold value till which the image is
988
+ # not sharpened. The lower the value of
989
+ # tau_threshold the more frequencies
990
+ # goes through the sharpening process
991
+ #
992
+ # gamma_speed: controls the speed of convergence to the slope
993
+ # smaller value gives a little bit more
994
+ # sharpened image, this may be a fine tuner
995
+
996
+ print("----------------------------------------------------")
997
+ print("Running sharpening by unsharp masking...")
998
+
999
+ # create gaussian kernel
1000
+ gaussian_kernel = utility.create_filter().gaussian(gaussian_kernel_size, gaussian_sigma)
1001
+
1002
+ # convolove the image with the gaussian kernel
1003
+ # first input is the image
1004
+ # second input is the kernel
1005
+ # output shape will be the same as the first input
1006
+ # boundary will be padded by using symmetrical method while convolving
1007
+ if np.ndim(self.data > 2):
1008
+ image_blur = np.empty(np.shape(self.data), dtype=np.float32)
1009
+ for i in range(0, np.shape(self.data)[2]):
1010
+ image_blur[:, :, i] = signal.convolve2d(self.data[:, :, i], gaussian_kernel, mode="same", boundary="symm")
1011
+ else:
1012
+ image_blur = signal.convolove2d(self.data, gaussian_kernel, mode="same", boundary="symm")
1013
+
1014
+ # the high frequency component image
1015
+ image_high_pass = self.data - image_blur
1016
+
1017
+ # soft coring (see in utility)
1018
+ # basically pass the high pass image via a slightly nonlinear function
1019
+ tau_threshold = tau_threshold * clip_range[1]
1020
+
1021
+ # add the soft cored high pass image to the original and clip
1022
+ # within range and return
1023
+ return np.clip(self.data + utility.special_function(\
1024
+ image_high_pass).soft_coring(\
1025
+ slope, tau_threshold, gamma_speed), clip_range[0], clip_range[1])
1026
+
1027
+ def __str__(self):
1028
+ return self.name
1029
+
1030
+
1031
+ # =============================================================
1032
+ # class: noise_reduction
1033
+ # reduce noise of the nonlinear image (after gamma)
1034
+ # =============================================================
1035
+ class noise_reduction:
1036
+ def __init__(self, data, clip_range=[0, 65535], name="noise reduction"):
1037
+ self.data = np.float32(data)
1038
+ self.clip_range = clip_range
1039
+ self.name = name
1040
+
1041
+ def sigma_filter(self, neighborhood_size=7, sigma=[6, 6, 6]):
1042
+
1043
+ print("----------------------------------------------------")
1044
+ print("Running noise reduction by sigma filter...")
1045
+
1046
+ if np.ndim(self.data > 2): # if rgb image
1047
+ output = np.empty(np.shape(self.data), dtype=np.float32)
1048
+ for i in range(0, np.shape(self.data)[2]):
1049
+ output[:, :, i] = utility.helpers(self.data[:, :, i]).sigma_filter_helper(neighborhood_size, sigma[i])
1050
+ return np.clip(output, self.clip_range[0], self.clip_range[1])
1051
+ else: # gray image
1052
+ return np.clip(utility.helpers(self.data).sigma_filter_helper(neighborhood_size, sigma), self.clip_range[0], self.clip_range[1])
1053
+
1054
+ def __str__(self):
1055
+ return self.name
1056
+
1057
+
1058
+ # =============================================================
1059
+ # class: distortion_correction
1060
+ # correct the distortion
1061
+ # =============================================================
1062
+ class distortion_correction:
1063
+ def __init__(self, data, name="distortion correction"):
1064
+ self.data = np.float32(data)
1065
+ self.name = name
1066
+
1067
+
1068
+ def empirical_correction(self, correction_type="pincushion-1", strength=0.1, zoom_type="crop", clip_range=[0, 65535]):
1069
+ #------------------------------------------------------
1070
+ # Objective:
1071
+ # correct geometric distortion with the assumption that the distortion
1072
+ # is symmetric and the center is at the center of of the image
1073
+ # Input:
1074
+ # correction_type: which type of correction needed to be carried
1075
+ # out, choose one the four:
1076
+ # pincushion-1, pincushion-2, barrel-1, barrel-2
1077
+ # 1 and 2 are difference between the power
1078
+ # over the radius
1079
+ #
1080
+ # strength: should be equal or greater than 0.
1081
+ # 0 means no correction will be done.
1082
+ # if negative value were applied correction_type
1083
+ # will be reversed. Thus,>=0 value expected.
1084
+ #
1085
+ # zoom_type: either "fit" or "crop"
1086
+ # fit will return image with full content
1087
+ # in the whole area
1088
+ # crop will return image will 0 values outsise
1089
+ # the border
1090
+ #
1091
+ # clip_range: to clip the final image within the range
1092
+ #------------------------------------------------------
1093
+
1094
+ if (strength < 0):
1095
+ print("Warning! strength should be equal of greater than 0.")
1096
+ return self.data
1097
+
1098
+ print("----------------------------------------------------")
1099
+ print("Running distortion correction by empirical method...")
1100
+
1101
+ # get half_width and half_height, assume this is the center
1102
+ width, height = utility.helpers(self.data).get_width_height()
1103
+ half_width = width / 2
1104
+ half_height = height / 2
1105
+
1106
+ # create a meshgrid of points
1107
+ xi, yi = np.meshgrid(np.linspace(-half_width, half_width, width),\
1108
+ np.linspace(-half_height, half_height, height))
1109
+
1110
+ # cartesian to polar coordinate
1111
+ r = np.sqrt(xi**2 + yi**2)
1112
+ theta = np.arctan2(yi, xi)
1113
+
1114
+ # maximum radius
1115
+ R = math.sqrt(width**2 + height**2)
1116
+
1117
+ # make r within range 0~1
1118
+ r = r / R
1119
+
1120
+ # apply the radius to the desired transformation
1121
+ s = utility.special_function(r).distortion_function(correction_type, strength)
1122
+
1123
+ # select a scaling_parameter based on zoon_type and k value
1124
+ if ((correction_type=="barrel-1") or (correction_type=="barrel-2")):
1125
+ if (zoom_type == "fit"):
1126
+ scaling_parameter = r[0, 0] / s[0, 0]
1127
+ elif (zoom_type == "crop"):
1128
+ scaling_parameter = 1. / (1. + strength * (np.min([half_width, half_height])/R)**2)
1129
+ elif ((correction_type=="pincushion-1") or (correction_type=="pincushion-2")):
1130
+ if (zoom_type == "fit"):
1131
+ scaling_parameter = 1. / (1. + strength * (np.min([half_width, half_height])/R)**2)
1132
+ elif (zoom_type == "crop"):
1133
+ scaling_parameter = r[0, 0] / s[0, 0]
1134
+
1135
+ # multiply by scaling_parameter and un-normalize
1136
+ s = s * scaling_parameter * R
1137
+
1138
+ # convert back to cartesian coordinate and add back the center coordinate
1139
+ xt = np.multiply(s, np.cos(theta))
1140
+ yt = np.multiply(s, np.sin(theta))
1141
+
1142
+ # interpolation
1143
+ if np.ndim(self.data == 3):
1144
+
1145
+ output = np.empty(np.shape(self.data), dtype=np.float32)
1146
+
1147
+ output[:, :, 0] = utility.helpers(self.data[:, :, 0]).bilinear_interpolation(xt + half_width, yt + half_height)
1148
+ output[:, :, 1] = utility.helpers(self.data[:, :, 1]).bilinear_interpolation(xt + half_width, yt + half_height)
1149
+ output[:, :, 2] = utility.helpers(self.data[:, :, 2]).bilinear_interpolation(xt + half_width, yt + half_height)
1150
+
1151
+ elif np.ndim(self.data == 2):
1152
+
1153
+ output = utility.helpers(self.data).bilinear_interpolation(xt + half_width, yt + half_height)
1154
+
1155
+ return np.clip(output, clip_range[0], clip_range[1])
1156
+
1157
+
1158
+ def __str__(self):
1159
+ return self.name
1160
+
1161
+
1162
+ # =============================================================
1163
+ # class: memory_color_enhancement
1164
+ # enhance memory colors such as sky, grass, skin color
1165
+ # =============================================================
1166
+ class memory_color_enhancement:
1167
+ def __init__(self, data, name="memory color enhancement"):
1168
+ self.data = np.float32(data)
1169
+ self.name = name
1170
+
1171
+ def by_hue_squeeze(self, target_hue, hue_preference, hue_sigma, is_both_side, multiplier, chroma_preference, chroma_sigma, color_space="srgb", illuminant="d65", clip_range=[0, 65535], cie_version="1931"):
1172
+
1173
+ # RGB to xyz
1174
+ data = utility.color_conversion(self.data).rgb2xyz(color_space, clip_range)
1175
+ # xyz to lab
1176
+ data = utility.color_conversion(data).xyz2lab(cie_version, illuminant)
1177
+ # lab to lch
1178
+ data = utility.color_conversion(data).lab2lch()
1179
+
1180
+ # hue squeezing
1181
+ # we are traversing through different color preferences
1182
+ width, height = utility.helpers(self.data).get_width_height()
1183
+ hue_correction = np.zeros((height, width), dtype=np.float32)
1184
+ for i in range(0, np.size(target_hue)):
1185
+
1186
+ delta_hue = data[:, :, 2] - hue_preference[i]
1187
+
1188
+ if is_both_side[i]:
1189
+ weight_temp = np.exp( -np.power(data[:, :, 2] - target_hue[i], 2) / (2 * hue_sigma[i]**2)) + \
1190
+ np.exp( -np.power(data[:, :, 2] + target_hue[i], 2) / (2 * hue_sigma[i]**2))
1191
+ else:
1192
+ weight_temp = np.exp( -np.power(data[:, :, 2] - target_hue[i], 2) / (2 * hue_sigma[i]**2))
1193
+
1194
+ weight_hue = multiplier[i] * weight_temp / np.max(weight_temp)
1195
+
1196
+ weight_chroma = np.exp( -np.power(data[:, :, 1] - chroma_preference[i], 2) / (2 * chroma_sigma[i]**2))
1197
+
1198
+ hue_correction = hue_correction + np.multiply(np.multiply(delta_hue, weight_hue), weight_chroma)
1199
+
1200
+ # correct the hue
1201
+ data[:, :, 2] = data[:, :, 2] - hue_correction
1202
+
1203
+ # lch to lab
1204
+ data = utility.color_conversion(data).lch2lab()
1205
+ # lab to xyz
1206
+ data = utility.color_conversion(data).lab2xyz(cie_version, illuminant)
1207
+ # xyz to rgb
1208
+ data = utility.color_conversion(data).xyz2rgb(color_space, clip_range)
1209
+
1210
+ return data
1211
+
1212
+
1213
+ def __str__(self):
1214
+ return self.name
1215
+
1216
+
1217
+ # =============================================================
1218
+ # class: chromatic_aberration_correction
1219
+ # removes artifacts similar to result from chromatic
1220
+ # aberration
1221
+ # =============================================================
1222
+ class chromatic_aberration_correction:
1223
+ def __init__(self, data, name="chromatic aberration correction"):
1224
+ self.data = np.float32(data)
1225
+ self.name = name
1226
+
1227
+ def purple_fringe_removal(self, nsr_threshold, cr_threshold, clip_range=[0, 65535]):
1228
+ # --------------------------------------------------------------
1229
+ # nsr_threshold: near saturated region threshold (in percentage)
1230
+ # cr_threshold: candidate region threshold
1231
+ # --------------------------------------------------------------
1232
+
1233
+ width, height = utility.helpers(self.data).get_width_height()
1234
+
1235
+ r = self.data[:, :, 0]
1236
+ g = self.data[:, :, 1]
1237
+ b = self.data[:, :, 2]
1238
+
1239
+ ## Detection of purple fringe
1240
+ # near saturated region detection
1241
+ nsr_threshold = clip_range[1] * nsr_threshold / 100
1242
+ temp = (r + g + b) / 3
1243
+ temp = np.asarray(temp)
1244
+ mask = temp > nsr_threshold
1245
+ nsr = np.zeros((height, width)).astype(int)
1246
+ nsr[mask] = 1
1247
+
1248
+ # candidate region detection
1249
+ temp = r - b
1250
+ temp1 = b - g
1251
+ temp = np.asarray(temp)
1252
+ temp1 = np.asarray(temp1)
1253
+ mask = (temp < cr_threshold) & (temp1 > cr_threshold)
1254
+ cr = np.zeros((height, width)).astype(int)
1255
+ cr[mask] = 1
1256
+
1257
+ # quantization
1258
+ qr = utility.helpers(r).nonuniform_quantization()
1259
+ qg = utility.helpers(g).nonuniform_quantization()
1260
+ qb = utility.helpers(b).nonuniform_quantization()
1261
+
1262
+ g_qr = utility.edge_detection(qr).sobel(5, "gradient_magnitude")
1263
+ g_qg = utility.edge_detection(qg).sobel(5, "gradient_magnitude")
1264
+ g_qb = utility.edge_detection(qb).sobel(5, "gradient_magnitude")
1265
+
1266
+ g_qr = np.asarray(g_qr)
1267
+ g_qg = np.asarray(g_qg)
1268
+ g_qb = np.asarray(g_qb)
1269
+
1270
+ # bgm: binary gradient magnitude
1271
+ bgm = np.zeros((height, width), dtype=np.float32)
1272
+ mask = (g_qr != 0) | (g_qg != 0) | (g_qb != 0)
1273
+ bgm[mask] = 1
1274
+
1275
+ fringe_map = np.multiply(np.multiply(nsr, cr), bgm)
1276
+ fring_map = np.asarray(fringe_map)
1277
+ mask = (fringe_map == 1)
1278
+
1279
+ r1 = r
1280
+ g1 = g
1281
+ b1 = b
1282
+ r1[mask] = g1[mask] = b1[mask] = (r[mask] + g[mask] + b[mask]) / 3.
1283
+
1284
+ output = np.empty(np.shape(self.data), dtype=np.float32)
1285
+ output[:, :, 0] = r1
1286
+ output[:, :, 1] = g1
1287
+ output[:, :, 2] = b1
1288
+
1289
+ return np.float32(output)
1290
+
1291
+
1292
+ def __str__(self):
1293
+ return self.name
IIR-Lab/ISP_pipeline/lsc_table_r_gr_gb_b_2.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0932e4b3ed5feb111880988eadae26a3cc77a5bd448150087b0983d8a62bb2b7
3
+ size 402653312
IIR-Lab/ISP_pipeline/process_pngs_isp.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('ISP_pipeline')
3
+ from raw_prc_pipeline.pipeline import RawProcessingPipelineDemo
4
+ import cv2
5
+ import numpy as np
6
+ import json
7
+ import PIL.Image as Image
8
+ import os,sys
9
+ from raw_prc_pipeline import io
10
+ from copy import deepcopy
11
+ import torch
12
+
13
+ def resize_using_pil(img, width=1024, height=768):
14
+ img_pil = Image.fromarray(img)
15
+ out_size = (width, height)
16
+ if img_pil.size == out_size:
17
+ return img
18
+ out_img = img_pil.resize(out_size, Image.LANCZOS)
19
+ # out_img = img_pil
20
+ out_img = np.array(out_img)
21
+ return out_img
22
+
23
+ def fix_orientation(image, orientation):
24
+
25
+ if type(orientation) is list:
26
+ orientation = orientation[0]
27
+
28
+ if orientation == 'Horizontal(normal)':
29
+ pass
30
+ elif orientation == "Mirror horizontal":
31
+ image = cv2.flip(image, 0)
32
+ elif orientation == "Rotate 180":
33
+ image = cv2.rotate(image, cv2.ROTATE_180)
34
+ elif orientation == "Mirror vertical":
35
+ image = cv2.flip(image, 1)
36
+ elif orientation == "Mirror horizontal and rotate 270 CW":
37
+ image = cv2.flip(image, 0)
38
+ image = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
39
+ elif orientation == "Rotate 90 CW":
40
+ image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
41
+ elif orientation == "Mirror horizontal and rotate 90 CW":
42
+ image = cv2.flip(image, 0)
43
+ image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
44
+ elif orientation == "Rotate 270 CW":
45
+ image = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
46
+
47
+ return image
48
+
49
+ def isp_night_imaging(data, meta_data, iso,
50
+ do_demosaic = True, # H/2 W/2
51
+
52
+ do_channel_gain_white_balance = True,
53
+ do_xyz_transform = True,
54
+ do_srgb_transform = True,
55
+
56
+ do_gamma_correct = True, # con
57
+
58
+ do_refinement = True, # 32 bit
59
+ do_to_uint8 = True,
60
+
61
+ do_resize_using_pil = True, # H/8, W/8
62
+ do_fix_orientation = True
63
+ ):
64
+
65
+ pipeline_params = {
66
+ 'tone_mapping': 'Flash', # options: Flash, Storm, Base, Linear, Drago, Mantiuk, Reinhard
67
+ 'illumination_estimation': 'gw', # ie algorithm, options: "gw", "wp", "sog", "iwp"
68
+ 'denoise_flg': True,
69
+ 'out_landscape_width': 1024,
70
+ 'out_landscape_height': 768,
71
+ "color_matrix": [ 1.06835938, -0.29882812, -0.14257812,
72
+ -0.43164062, 1.35546875, 0.05078125,
73
+ -0.1015625, 0.24414062, 0.5859375]
74
+ }
75
+
76
+ pipeline_demo = RawProcessingPipelineDemo(**pipeline_params)
77
+
78
+ # ===================================
79
+ # Demosacing
80
+ # ===================================
81
+ if do_demosaic:
82
+ data = torch.stack((data[0,:,:], (data[1,:,:]+data[2,:,:])/2, data[3,:,:]), dim=0)
83
+ data = data.permute(1, 2, 0).contiguous()
84
+ # torch.cuda.empty_cache()
85
+ else:
86
+ pass
87
+
88
+ # ===================================
89
+ # Channel gain for white balance
90
+ # ===================================
91
+ if do_channel_gain_white_balance:
92
+ data = pipeline_demo.white_balance(data, img_meta=meta_data)
93
+
94
+ else:
95
+ pass
96
+
97
+ # ===================================
98
+ # xyz_transform
99
+ # ===================================
100
+ if do_xyz_transform:
101
+ data = pipeline_demo.xyz_transform(data,img_meta=meta_data) # CCM
102
+ else:
103
+ pass
104
+
105
+ # ===================================
106
+ # srgb_transform
107
+ # ===================================
108
+ if do_srgb_transform:
109
+ data = pipeline_demo.srgb_transform(data, img_meta=meta_data) # fix ccm
110
+ else:
111
+ pass
112
+
113
+ # ===================================
114
+ # gamma_correct
115
+ # ===================================
116
+ if do_gamma_correct:
117
+ data = pipeline_demo.gamma_correct(data, img_meta=meta_data)
118
+ else:
119
+ pass
120
+
121
+ # ===================================
122
+ # refinement
123
+ # ===================================
124
+ if do_refinement:
125
+ if iso < 1000:
126
+ pth1 = "Rendering_models/low_iso.pth"
127
+ data = pipeline_demo.do_refinement(data, "csrnet", pth1)
128
+ else:
129
+ pth1 = "Rendering_models/high_iso.pth"
130
+ data = pipeline_demo.do_refinement(data, "csrnet", pth1)
131
+ torch.cuda.empty_cache()
132
+
133
+ else:
134
+ pass
135
+
136
+ # ===================================
137
+ # to_uint8
138
+ # ===================================
139
+ if do_to_uint8:
140
+ data = pipeline_demo.to_uint8(data, img_meta=meta_data)
141
+ torch.cuda.empty_cache()
142
+ else:
143
+ pass
144
+
145
+ # ===================================
146
+ # resize_using_pil
147
+ # ===================================
148
+ if do_resize_using_pil:
149
+ data = resize_using_pil(data, pipeline_demo.params["out_landscape_width"], pipeline_demo.params["out_landscape_height"])
150
+
151
+ else:
152
+ pass
153
+
154
+ # ===================================
155
+ # fix_orientation
156
+ # ===================================
157
+ if do_fix_orientation:
158
+ data = fix_orientation(data, meta_data["orientation"])
159
+ else:
160
+ pass
161
+
162
+ return data
163
+
164
+ def readjson(json_path,):
165
+ with open(json_path,'r',encoding='UTF-8') as f:
166
+ result = json.load(f)
167
+
168
+ return result
169
+
170
+ def get_smooth_kernel_size(factor):
171
+ if factor == 1:
172
+ return (5, 5)
173
+ elif factor == 0.5:
174
+ return (3, 3)
175
+ elif factor == 0.375:
176
+ return (3, 3)
177
+ elif factor in [0.2, 0.25]:
178
+ return (5, 5)
179
+ elif factor == 0.125:
180
+ return (7, 7)
181
+ else:
182
+ raise Exception('Unknown factor')
183
+
184
+ def read_rawpng(path, metadata):
185
+
186
+ raw = cv2.imread(str(path), cv2.IMREAD_UNCHANGED)
187
+
188
+ if raw.shape[0] == 4:
189
+ return raw * 959
190
+ raw = (raw.astype(np.float32) - 256.) / (4095.- 256.)
191
+
192
+ raw = bayer2raw(raw, metadata)
193
+ raw = np.clip(raw, 0., 1.)
194
+ return raw
195
+
196
+ def bayer2raw(raw, metadata):
197
+ # pack RGGB Bayer raw to 4 channels
198
+ H, W = raw.shape
199
+ raw = raw[None, ...]
200
+ if metadata['cfa_pattern'][0] == 0:
201
+ # RGGB
202
+ raw_pack = np.concatenate((raw[:, 0:H:2, 0:W:2],
203
+ raw[:, 0:H:2, 1:W:2],
204
+ raw[:, 1:H:2, 0:W:2],
205
+ raw[:, 1:H:2, 1:W:2]), axis=0)
206
+ else :
207
+ # BGGR
208
+ raw_pack = np.concatenate((raw[:, 1:H:2, 1:W:2],
209
+ raw[:, 0:H:2, 1:W:2],
210
+ raw[:, 1:H:2, 0:W:2],
211
+ raw[:, 0:H:2, 0:W:2]), axis=0)
212
+ return raw_pack
213
+
214
+ def raw_rggb_float32(raws):
215
+ # depack 4 channels raw to RGGB Bayer
216
+ C, H, W = raws.shape
217
+ output = np.zeros((H * 2, W * 2)).astype(np.float32)
218
+
219
+ output[0:2 * H:2, 0:2 * W:2] = raws[0:1, :, :]
220
+ output[0:2 * H:2, 1:2 * W:2] = raws[1:2, :, :]
221
+ output[1:2 * H:2, 0:2 * W:2] = raws[2:3, :, :]
222
+ output[1:2 * H:2, 1:2 * W:2] = raws[3:4, :, :]
223
+
224
+ return output
225
+
226
+ def json_read(pth):
227
+ with open(pth) as j:
228
+ data = json.load(j)
229
+ return data
230
+
231
+ def linear_insert_1color(img_dt, resize, fx=128, fy=128):
232
+ pos_0_0, pos_0_1, pos_1_1, pos_1_0, m, n = insert_linear_pos(img_dt=img_dt, resize=resize, x_scale=fx, y_scale=fy)
233
+ a = (pos_1_0 - pos_0_0)
234
+ b = (pos_0_1 - pos_0_0)
235
+ c = pos_1_1 + pos_0_0 - pos_1_0 - pos_0_1
236
+ return np.round(a * n + b * m + c * n * m + pos_0_0).astype(int)
237
+
238
+ def insert_linear_pos(img_dt, resize, x_scale=128, y_scale=128):
239
+ m_, n_ = img_dt.shape
240
+ # 获取新的图像的大小
241
+ if resize is None:
242
+ n_new, m_new = np.round(x_scale * n_).astype(int), np.round(y_scale * m_).astype(int)
243
+ else:
244
+ n_new, m_new = resize
245
+
246
+ n_scale, m_scale = n_ / n_new, m_ / m_new # src_with/dst_with, Src_height/dst_heaight
247
+ # 一、获取位置对应的四个点
248
+ # 1-1- 初始化位置
249
+ m_indxs = np.repeat(np.arange(m_new), n_new).reshape(m_new, n_new)
250
+ n_indxs = np.array(list(range(n_new))*m_new).reshape(m_new, n_new)
251
+ # 1-2- 初始化位置
252
+ m_indxs_c = (m_indxs + 0.5 ) * m_scale - 0.5
253
+ n_indxs_c = (n_indxs + 0.5 ) * n_scale - 0.5
254
+ ### 将小于零的数处理成0
255
+ m_indxs_c[np.where(m_indxs_c < 0)] = 0.0
256
+ n_indxs_c[np.where(n_indxs_c < 0)] = 0.0
257
+
258
+ # 1-3 获取正方形顶点坐标
259
+ m_indxs_c_down = m_indxs_c.astype(int)
260
+ n_indxs_c_down = n_indxs_c.astype(int)
261
+ m_indxs_c_up = m_indxs_c_down + 1
262
+ n_indxs_c_up = n_indxs_c_down + 1
263
+ ### 溢出部分修正
264
+ m_max = m_ - 1
265
+ n_max = n_ - 1
266
+ m_indxs_c_up[np.where(m_indxs_c_up > m_max)] = m_max
267
+ n_indxs_c_up[np.where(n_indxs_c_up > n_max)] = n_max
268
+
269
+ # 1-4 获取正方形四个顶点的位置
270
+ pos_0_0 = img_dt[m_indxs_c_down, n_indxs_c_down].astype(int)
271
+ pos_0_1 = img_dt[m_indxs_c_up, n_indxs_c_down].astype(int)
272
+ pos_1_1 = img_dt[m_indxs_c_up, n_indxs_c_up].astype(int)
273
+ pos_1_0 = img_dt[m_indxs_c_down, n_indxs_c_up].astype(int)
274
+ # 1-5 获取浮点位置
275
+ m, n = np.modf(m_indxs_c)[0], np.modf(n_indxs_c)[0]
276
+ return pos_0_0, pos_0_1, pos_1_1, pos_1_0, m, n
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ expected_img_ext = '.jpg'
2
+ expected_landscape_img_height = 866
3
+ expected_landscape_img_width = 1300
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (320 Bytes). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (269 Bytes). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/arch_util.cpython-312.pyc ADDED
Binary file (26.8 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/color.cpython-312.pyc ADDED
Binary file (17.6 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/color.cpython-39.pyc ADDED
Binary file (9.81 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/csrnet_network.cpython-312.pyc ADDED
Binary file (4.95 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/csrnet_network.cpython-39.pyc ADDED
Binary file (2.52 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/exif_data_formats.cpython-312.pyc ADDED
Binary file (1.4 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/exif_data_formats.cpython-39.pyc ADDED
Binary file (1.03 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/exif_utils.cpython-312.pyc ADDED
Binary file (10.1 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/exif_utils.cpython-39.pyc ADDED
Binary file (5.4 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/fs.cpython-312.pyc ADDED
Binary file (2.86 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/fs.cpython-39.pyc ADDED
Binary file (1.63 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/io.cpython-312.pyc ADDED
Binary file (3.09 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/io.cpython-39.pyc ADDED
Binary file (2.02 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/lut_network.cpython-312.pyc ADDED
Binary file (17.9 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/misc.cpython-312.pyc ADDED
Binary file (13.2 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/misc.cpython-39.pyc ADDED
Binary file (7.07 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/optim.cpython-312.pyc ADDED
Binary file (2.29 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/optim.cpython-39.pyc ADDED
Binary file (1.49 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline.cpython-312.pyc ADDED
Binary file (15 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline.cpython-39.pyc ADDED
Binary file (11.6 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline_bm3d.cpython-312.pyc ADDED
Binary file (12.2 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline_bm3d.cpython-39.pyc ADDED
Binary file (9.51 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline_utils.cpython-312.pyc ADDED
Binary file (34 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/pipeline_utils.cpython-39.pyc ADDED
Binary file (20 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/__pycache__/refine_network.cpython-312.pyc ADDED
Binary file (22.5 kB). View file
 
IIR-Lab/ISP_pipeline/raw_prc_pipeline/arch_util.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.init as init
4
+ import torch.nn.functional as F
5
+
6
+
7
+ def initialize_weights(net_l, scale=1):
8
+ if not isinstance(net_l, list):
9
+ net_l = [net_l]
10
+ for net in net_l:
11
+ for m in net.modules():
12
+ if isinstance(m, nn.Conv2d):
13
+ init.kaiming_normal_(m.weight, a=0, mode='fan_in')
14
+ m.weight.data *= scale # for residual block
15
+ if m.bias is not None:
16
+ m.bias.data.zero_()
17
+ elif isinstance(m, nn.Linear):
18
+ init.kaiming_normal_(m.weight, a=0, mode='fan_in')
19
+ m.weight.data *= scale
20
+ if m.bias is not None:
21
+ m.bias.data.zero_()
22
+ elif isinstance(m, nn.BatchNorm2d):
23
+ init.constant_(m.weight, 1)
24
+ init.constant_(m.bias.data, 0.0)
25
+
26
+
27
+ def make_layer(block, n_layers):
28
+ layers = []
29
+ for _ in range(n_layers):
30
+ layers.append(block())
31
+ return nn.Sequential(*layers)
32
+
33
+
34
+ class ResidualBlock_noBN(nn.Module):
35
+ '''Residual block w/o BN
36
+ ---Conv-ReLU-Conv-+-
37
+ |________________|
38
+ '''
39
+
40
+ def __init__(self, nf=64):
41
+ super(ResidualBlock_noBN, self).__init__()
42
+ self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
43
+ self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
44
+
45
+ # initialization
46
+ initialize_weights([self.conv1, self.conv2], 0.1)
47
+
48
+ def forward(self, x):
49
+ identity = x
50
+ out = F.relu(self.conv1(x), inplace=True)
51
+ out = self.conv2(out)
52
+ return identity + out
53
+
54
+
55
+ def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros'):
56
+ """Warp an image or feature map with optical flow
57
+ Args:
58
+ x (Tensor): size (N, C, H, W)
59
+ flow (Tensor): size (N, H, W, 2), normal value
60
+ interp_mode (str): 'nearest' or 'bilinear'
61
+ padding_mode (str): 'zeros' or 'border' or 'reflection'
62
+
63
+ Returns:
64
+ Tensor: warped image or feature map
65
+ """
66
+ assert x.size()[-2:] == flow.size()[1:3]
67
+ B, C, H, W = x.size()
68
+ # mesh grid
69
+ grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))
70
+ grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2
71
+ grid.requires_grad = False
72
+ grid = grid.type_as(x)
73
+ vgrid = grid + flow
74
+ # scale grid to [-1,1]
75
+ vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0
76
+ vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0
77
+ vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)
78
+ output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)
79
+ return output
80
+
81
+ """
82
+ Copyright (c) 2022 Samsung Electronics Co., Ltd.
83
+
84
+ Author(s):
85
+ Luxi Zhao (lucy.zhao@samsung.com; lucyzhao.zlx@gmail.com)
86
+ Abdelrahman Abdelhamed (abdoukamel@gmail.com)
87
+
88
+ Licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0) License, (the "License");
89
+ you may not use this file except in compliance with the License.
90
+ You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc-sa/4.0
91
+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
92
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
93
+ See the License for the specific language governing permissions and limitations under the License.
94
+ For conditions of distribution and use, see the accompanying LICENSE.md file.
95
+
96
+ """
97
+
98
+ import numpy as np
99
+ import torch
100
+ import torch.nn as nn
101
+ import torch.nn.functional as F
102
+ def utils_get_image_stats(image_shape, grid_size):
103
+ """
104
+ Information about the cropped image.
105
+ :return: grid size, tile size, sizes of the 4 margins, meshgrids.
106
+ """
107
+
108
+ grid_rows = grid_size[0]
109
+ grid_cols = grid_size[1]
110
+
111
+ residual_height = image_shape[0] % grid_rows
112
+ residual_width = image_shape[1] % grid_cols
113
+
114
+ tile_height = image_shape[0] // grid_rows
115
+ tile_width = image_shape[1] // grid_cols
116
+
117
+ margin_top = tile_height // 2
118
+ margin_left = tile_width // 2
119
+
120
+ margin_bot = tile_height + residual_height - margin_top
121
+ margin_right = tile_width + residual_width - margin_left
122
+
123
+ return tile_height, tile_width, margin_top, margin_left, margin_bot, margin_right
124
+
125
+ def apply_ltm_lut(imgs, luts):
126
+
127
+ imgs = (imgs - .5) * 2.
128
+
129
+ grids = imgs.unsqueeze(0).unsqueeze(0)
130
+ luts = luts.unsqueeze(0)
131
+
132
+ outs = F.grid_sample(luts, grids,
133
+ mode='bilinear', padding_mode='border', align_corners=True)
134
+
135
+ return outs.squeeze(0).squeeze(1).permute(1,2,0)
136
+
137
+
138
+ def apply_ltm(image, tone_curve, num_curves):
139
+ """
140
+ Apply tone curve to an image (patch).
141
+ :param image: (h, w, 3) if num_curves == 3, else (h, w)
142
+ :param tone_curve: (num_curves, control_points)
143
+ :param num_curves: 3 for 1 curve per channel, 1 for 1 curve for all channels.
144
+ :return: tone-mapped image.
145
+ """
146
+
147
+ if image.shape[-1] == 3:
148
+ if type(image) == np.ndarray:
149
+ r = tone_curve[0][image[..., 0]]
150
+ g = tone_curve[1][image[..., 1]]
151
+ b = tone_curve[2][image[..., 2]]
152
+ new_image = np.stack((r, g, b), axis=-1)
153
+ else:
154
+ r = tone_curve[0][image[..., 0].reshape(-1).long()].reshape(image[..., 0].shape)
155
+ g = tone_curve[1][image[..., 1].reshape(-1).long()].reshape(image[..., 1].shape)
156
+ b = tone_curve[2][image[..., 2].reshape(-1).long()].reshape(image[..., 2].shape)
157
+ new_image = torch.stack((r, g, b), axis=-1)
158
+ # new_image = np.stack((r, g, b), axis=-1)
159
+ else:
160
+ new_image = tone_curve[0][image[..., 0].reshape(-1).long()].reshape(image[..., 0].shape).unsqueeze(dim=2)
161
+ #tone_curve[0][image[..., 0].reshape(-1).long()].reshape(image[..., 0].shape)
162
+
163
+ return new_image
164
+
165
+
166
+ def apply_gtm(image, tone_curve, num_curves):
167
+ """
168
+ Apply a single tone curve to an image.
169
+ :param image: (h, w, 3) if num_curves == 3, else (h, w)
170
+ :param tone_curve: (1, num_curves, control_points)
171
+ :param num_curves: 3 for 1 curve per channel, 1 for 1 curve for all channels.
172
+ :return: tone-mapped image.
173
+ """
174
+ tone_curve = tone_curve[0]
175
+ out = apply_ltm(image, tone_curve, num_curves)
176
+ return out
177
+
178
+
179
+ def apply_ltm_center(image, tone_curves, stats, num_curves):
180
+ """
181
+ Apply tone curves to the center region of an image.
182
+ :param image: the original image.
183
+ :param tone_curves: a list of all tone curves in row scan order.
184
+ :return: interpolated center region of an image.
185
+ """
186
+ grid_rows, grid_cols, tile_height, tile_width, margin_top, margin_left, margin_bot, margin_right, meshgrids = stats
187
+ xs_tl, ys_tl, xs_br, ys_br = meshgrids['center']
188
+
189
+ if torch.cuda.is_available():
190
+ device = torch.device("cuda")
191
+ else:
192
+ device = torch.device("cpu")
193
+ xs_tl = xs_tl.to(device)
194
+ ys_tl = ys_tl.to(device)
195
+ xs_br = xs_br.to(device)
196
+ ys_br = ys_br.to(device)
197
+
198
+
199
+ # Get neighbourhoods
200
+ neighbourhoods = []
201
+ for y in range(margin_top, image.shape[0]-margin_bot, tile_height):
202
+ for x in range(margin_left, image.shape[1]-margin_right, tile_width):
203
+ neighbourhoods.append(image[y:y + tile_height, x:x + tile_width, :])
204
+
205
+ assert len(neighbourhoods) == (grid_rows-1) * (grid_cols-1)
206
+
207
+ # Get indices for all 4-tile neighbourhoods
208
+ tile_ids = []
209
+ for i in range(grid_rows - 1):
210
+ for j in range(grid_cols - 1):
211
+ start = i * grid_cols + j
212
+ tile_ids.append([start, start + 1, start + grid_cols, start + grid_cols + 1])
213
+
214
+ # Apply LTM and interpolate
215
+ new_ns = []
216
+ for i, n in enumerate(neighbourhoods):
217
+ n_tile_ids = tile_ids[i] # ids of the 4 tone curves (tiles) of the neighbourhood
218
+ # n_4versions = [apply_ltm(n, tone_curves[j], num_curves) for j in n_tile_ids] # tl, tr, bl, br
219
+ n_4versions = [apply_ltm_lut(n, tone_curves[j])for j in n_tile_ids]
220
+ out = ys_br * xs_br * n_4versions[0] + ys_br * xs_tl * n_4versions[1] + ys_tl * xs_br * n_4versions[2] + ys_tl * xs_tl * n_4versions[3]
221
+ out /= (tile_height-1) * (tile_width-1)
222
+
223
+ new_ns.append(out)
224
+
225
+ # Stack the interpolated neighbourhoods together
226
+ rows = []
227
+ for i in range(grid_rows - 1):
228
+ cols = [new_ns[i * (grid_cols - 1) + j] for j in range(grid_cols - 1)]
229
+ row = torch.cat(cols, dim=1)
230
+ rows.append(row)
231
+ out = torch.cat(rows, dim=0)
232
+ return out
233
+
234
+
235
+ def apply_ltm_border(image, tone_curves, stats, num_curves=3):
236
+ """
237
+ Apply tone curves to the border, not including corner areas.
238
+ :param image: the original image.
239
+ :param tone_curves: a list of all tone curves in row scan order.
240
+ :return: interpolated border regions of the image. In order of top, bottom, left, right.
241
+ """
242
+ grid_rows, grid_cols, tile_height, tile_width, margin_top, margin_left, margin_bot, margin_right, meshgrids = stats
243
+ (top_xs_l, top_xs_r), (bot_xs_l, bot_xs_r), (left_ys_t, left_ys_b), (right_ys_t, right_ys_b) = meshgrids['border']
244
+
245
+ if torch.cuda.is_available():
246
+ device = torch.device("cuda")
247
+ else:
248
+ device = torch.device("cpu")
249
+
250
+ top_xs_l = top_xs_l.to(device)
251
+ top_xs_r = top_xs_r.to(device)
252
+ bot_xs_l = bot_xs_l.to(device)
253
+ bot_xs_r = bot_xs_r.to(device)
254
+
255
+ left_ys_t = left_ys_t.to(device)
256
+ left_ys_b = left_ys_b.to(device)
257
+ right_ys_t = right_ys_t.to(device)
258
+ right_ys_b = right_ys_b.to(device)
259
+ # top, bottom, left, right neighbourhoods to be interpolated
260
+ ntop = []
261
+ nbot = []
262
+ nleft = []
263
+ nright = []
264
+
265
+ for x in range(margin_left, image.shape[1] - margin_right, tile_width):
266
+ ntop.append(image[:margin_top, x:x + tile_width, :])
267
+ nbot.append(image[-margin_bot:, x:x + tile_width, :])
268
+
269
+ for y in range(margin_top, image.shape[0] - margin_bot, tile_height):
270
+ nleft.append(image[y:y + tile_height, :margin_left, :])
271
+ nright.append(image[y:y + tile_height, -margin_right:, :])
272
+
273
+ def apply_ltm_two_tiles(tc1, tc2, meshgrid1, meshgrid2, nbhd, interp_length, num_curves):
274
+ """
275
+ Apply tone curve to, and interpolate a two-tile neighbourhood, either horizontal or vertical
276
+ :param tc1: left / top tone curves
277
+ :param tc2: right / bottom tone curves
278
+ :param meshgrid1: left / top meshgrids (leftmost / topmost positions are 0)
279
+ :param meshgrid2: right / bottom meshgrids (rightmost / bottommost positions are 0)
280
+ :param nbhd: neighbourhood to interpolate
281
+ :param interp_length: normalizing factor of the meshgrid.
282
+ Example: if xs = np.meshgrid(np.arange(10)), then interp_length = 9
283
+ :return: interpolated neighbourhood
284
+ """
285
+
286
+ # new_nbhd1 = apply_ltm(nbhd, tc1, num_curves)
287
+ # new_nbhd2 = apply_ltm(nbhd, tc2, num_curves)
288
+
289
+ new_nbhd1 = apply_ltm_lut(nbhd, tc1)
290
+ new_nbhd2 = apply_ltm_lut(nbhd, tc2)
291
+
292
+ out = meshgrid1 * new_nbhd2 + meshgrid2 * new_nbhd1
293
+ out /= interp_length
294
+ return out
295
+
296
+ new_ntop = [apply_ltm_two_tiles(tone_curves[i], # left tone curve
297
+ tone_curves[i + 1], # right tone curve
298
+ top_xs_l, top_xs_r,
299
+ n, tile_width - 1, num_curves) for i, n in enumerate(ntop)]
300
+
301
+ new_nbot = [apply_ltm_two_tiles(tone_curves[(grid_rows - 1) * grid_cols + i], # left tone curve
302
+ tone_curves[(grid_rows - 1) * grid_cols + i + 1], # right tone curve
303
+ bot_xs_l, bot_xs_r,
304
+ n, tile_width - 1, num_curves) for i, n in enumerate(nbot)]
305
+
306
+ new_nleft = [apply_ltm_two_tiles(tone_curves[i * grid_cols], # top tone curve
307
+ tone_curves[(i + 1) * grid_cols], # bottom tone curve
308
+ left_ys_t, left_ys_b,
309
+ n, tile_height - 1, num_curves) for i, n in enumerate(nleft)]
310
+
311
+ new_nright = [apply_ltm_two_tiles(tone_curves[(i + 1) * grid_cols - 1], # top tone curve
312
+ tone_curves[(i + 2) * grid_cols - 1], # bottom tone curve
313
+ right_ys_t, right_ys_b,
314
+ n, tile_height - 1, num_curves) for i, n in enumerate(nright)]
315
+
316
+ new_ntop = torch.cat(new_ntop, dim=1)
317
+ new_nbot = torch.cat(new_nbot, dim=1)
318
+ new_nleft = torch.cat(new_nleft, dim=0)
319
+ new_nright = torch.cat(new_nright, dim=0)
320
+ return new_ntop, new_nbot, new_nleft, new_nright
321
+
322
+
323
+ def apply_ltm_corner(image, tone_curves, stats, num_curves=3):
324
+ """
325
+ tone_curves: a list of all tone curves in row scan order.
326
+ return: interpolated corner tiles in the order of top left, top right, bot left, bot right
327
+ """
328
+ grid_rows, grid_cols, tile_height, tile_width, margin_top, margin_left, margin_bot, margin_right, _ = stats
329
+
330
+ corner_ids = [0, grid_cols - 1, -grid_rows, -1]
331
+ tl_tile = image[:margin_top, :margin_left]
332
+ tr_tile = image[:margin_top, -margin_right:]
333
+ bl_tile = image[-margin_bot:, :margin_left]
334
+ br_tile = image[-margin_bot:, -margin_right:]
335
+
336
+ corner_tiles = [tl_tile, tr_tile, bl_tile, br_tile]
337
+ corner_tcs = [tone_curves[i] for i in corner_ids] # tcs: (grid_size, num_curves, control_points)
338
+ #new_tiles = [apply_ltm(corner_tiles[i], corner_tcs[i], num_curves) for i in range(len(corner_tcs))]
339
+ new_tiles = [apply_ltm_lut(corner_tiles[i],corner_tcs[i]) for i in range(len(corner_tcs))]
340
+
341
+ return new_tiles[0], new_tiles[1], new_tiles[2], new_tiles[3]
342
+
343
+
344
+ # def get_meshgrids(height, width):
345
+ # """
346
+ # Get two meshgrids of size (height, width). One with top left corner being (0, 0),
347
+ # the other with bottom right corner being (0, 0).
348
+ # :return: top left xs, ys, bottom right xs, ys
349
+ # """
350
+ # xs, ys = np.meshgrid(np.arange(width), np.arange(height))
351
+ # newys, newxs = torch.meshgrid(torch.arange(height, dtype=torch.int32), torch.arange(width, dtype=torch.int32))
352
+ # # mesh grid for top left corner
353
+ # xs_tl = np.tile(np.abs(xs)[..., np.newaxis], 3) # [0, 1, 2, ..., tile_width-1]
354
+ # ys_tl = np.tile(np.abs(ys)[..., np.newaxis], 3)
355
+ # new_xs_tl = newxs[..., None].abs().repeat(1, 1, 3)
356
+ # new_ys_tl = newys[..., None].abs().repeat(1, 1, 3)
357
+ # # mesh grid for bottom right corner
358
+ # xs_br = np.tile(np.abs(xs - width + 1)[..., np.newaxis], 3) # [-(tile_width-1), ..., -2, -1, 0]
359
+ # ys_br = np.tile(np.abs(ys - height + 1)[..., np.newaxis], 3)
360
+
361
+ # new_xs_br = (newxs - width + 1).abs()[..., None].repeat(1, 1, 3)
362
+ # new_ys_br = (newys - width + 1).abs()[..., None].repeat(1, 1, 3)
363
+ # # return xs_tl, ys_tl, xs_br, ys_br
364
+ # return new_xs_tl, new_ys_tl, new_xs_br, new_ys_br
365
+ def get_meshgrids(height, width):
366
+ """
367
+ Get two meshgrids of size (height, width). One with top left corner being (0, 0),
368
+ the other with bottom right corner being (0, 0).
369
+ :return: top left xs, ys, bottom right xs, ys
370
+ """
371
+ xs, ys = np.meshgrid(np.arange(width), np.arange(height))
372
+ # mesh grid for top left corner
373
+ xs_tl = np.tile(np.abs(xs)[..., np.newaxis], 3) # [0, 1, 2, ..., tile_width-1]
374
+ ys_tl = np.tile(np.abs(ys)[..., np.newaxis], 3)
375
+ # mesh grid for bottom right corner
376
+ xs_br = np.tile(np.abs(xs - width + 1)[..., np.newaxis], 3) # [-(tile_width-1), ..., -2, -1, 0]
377
+ ys_br = np.tile(np.abs(ys - height + 1)[..., np.newaxis], 3)
378
+
379
+ return torch.tensor(xs_tl), torch.tensor(ys_tl), torch.tensor(xs_br), torch.tensor(ys_br)
380
+
381
+
382
+
383
+ def get_meshgrid_center(tile_height, tile_width):
384
+ return get_meshgrids(tile_height, tile_width)
385
+
386
+
387
+ def get_meshgrid_border(tile_height, tile_width, margin_top, margin_left, margin_bot, margin_right):
388
+ """
389
+ :return: meshgrids for the 4 border regions, in the order of top, bottom, left, right
390
+ """
391
+ # top
392
+ top_xs_l, _, top_xs_r, _ = get_meshgrids(margin_top, tile_width)
393
+
394
+ # bottom
395
+ bot_xs_l, _, bot_xs_r, _ = get_meshgrids(margin_bot, tile_width)
396
+
397
+ # left
398
+ _, left_ys_t, _, left_ys_b = get_meshgrids(tile_height, margin_left)
399
+
400
+ # right
401
+ _, right_ys_t, _, right_ys_b = get_meshgrids(tile_height, margin_right)
402
+
403
+ return (top_xs_l, top_xs_r), (bot_xs_l, bot_xs_r), (left_ys_t, left_ys_b), (right_ys_t, right_ys_b)
404
+
405
+
406
+ def get_image_stats(image, grid_size):
407
+ """
408
+ Information about the cropped image.
409
+ :param image: the original image
410
+ :return: grid size, tile size, sizes of the 4 margins, meshgrids.
411
+ """
412
+
413
+ grid_rows = grid_size[0]
414
+ grid_cols = grid_size[1]
415
+
416
+ tile_height, tile_width, margin_top, margin_left, margin_bot, margin_right = utils_get_image_stats(image.shape,
417
+ grid_size)
418
+
419
+ meshgrid_center = get_meshgrid_center(tile_height, tile_width)
420
+ meshgrid_border = get_meshgrid_border(tile_height, tile_width, margin_top, margin_left, margin_bot, margin_right)
421
+
422
+ meshgrids = {
423
+ 'center': meshgrid_center,
424
+ 'border': meshgrid_border
425
+ }
426
+
427
+ return grid_rows, grid_cols, tile_height, tile_width, margin_top, margin_left, margin_bot, margin_right, meshgrids
428
+
429
+
430
+ #自己构造image 1 * 512 * 512 *3,tone_curve 1 * 64 * 3 * 256 维度得tf tensor 然后只在这里debug
431
+ def do_interpolation_lut(image, tone_curves, grid_size, num_curves=3):
432
+ """
433
+ Perform tone mapping and interpolation on an image.
434
+ Center region: bilinear interpolation.
435
+ Border region: linear interpolation.
436
+ Corner region: no interpolation.
437
+ :param num_curves: 3 -> 1 curve for each R,G,B channel, 1 -> 1 curve for all channels
438
+ :param image: input int8
439
+ :param tone_curves: (grid_size, num_curves, control_points)
440
+ :param grid_size: (ncols, nrows)
441
+ :return: image: float32, between [0-1]
442
+ """
443
+ if grid_size[0] == 1 and grid_size[1] == 1:
444
+ return apply_gtm(image, tone_curves, num_curves).astype(np.float64)
445
+
446
+ # get image statistics
447
+ stats = get_image_stats(image, grid_size)
448
+
449
+
450
+
451
+ # Center area:
452
+ center = apply_ltm_center(image, tone_curves, stats, num_curves)
453
+
454
+ # Border area:
455
+ b_top, b_bot, b_left, b_right = apply_ltm_border(image, tone_curves, stats, num_curves)
456
+
457
+ # Corner area:
458
+ tlc, trc, blc, brc = apply_ltm_corner(image, tone_curves, stats, num_curves)
459
+
460
+ # stack the corners, borders, and center together
461
+ row_t = torch.cat([tlc, b_top, trc], dim=1)
462
+ row_c = torch.cat([b_left, center, b_right], dim=1)
463
+ row_b = torch.cat([blc, b_bot, brc], dim=1)
464
+ out = torch.cat([row_t, row_c, row_b], dim=0)
465
+
466
+ assert out.shape == image.shape
467
+
468
+ return out
469
+
470
+
471
+
472
+ class Model(nn.Module):
473
+ def __init__(self):
474
+ super(Model, self).__init__()
475
+ # self.conv1 = nn.Conv2d(3, 4, kernel_size=3, padding=1)
476
+ # self.pool1 = nn.MaxPool2d(2)
477
+
478
+ # self.conv2 = nn.Conv2d(4, 8, kernel_size=3, padding=1)
479
+ # self.pool2 = nn.MaxPool2d(2)
480
+
481
+ # self.conv3 = nn.Conv2d(8, 16, kernel_size=3, padding=1)
482
+ # self.pool3 = nn.MaxPool2d(2)
483
+
484
+ # self.conv4 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
485
+ # self.pool4 = nn.MaxPool2d(2)
486
+
487
+ # self.conv5 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
488
+ # self.pool5 = nn.MaxPool2d(2)
489
+
490
+ # self.conv6 = nn.Conv2d(64, 768, kernel_size=3, padding=1)
491
+ # self.pool6 = nn.MaxPool2d(2)
492
+
493
+ self.layer_1 = nn.Sequential(
494
+ nn.Conv2d(3, 4, kernel_size=3, padding=1),
495
+ nn.BatchNorm2d(4),
496
+ nn.ReLU(),
497
+ nn.MaxPool2d(2)
498
+ )
499
+ self.layer_2 = nn.Sequential(
500
+ nn.Conv2d(4, 8, kernel_size=3, padding=1),
501
+ nn.BatchNorm2d(8),
502
+ nn.ReLU(),
503
+ nn.MaxPool2d(2)
504
+ )
505
+ self.layer_3 = nn.Sequential(
506
+ nn.Conv2d(8, 16, kernel_size=3, padding=1),
507
+ nn.BatchNorm2d(16),
508
+ nn.ReLU(),
509
+ nn.MaxPool2d(2)
510
+ )
511
+ self.layer_4 = nn.Sequential(
512
+ nn.Conv2d(16, 32, kernel_size=3, padding=1),
513
+ nn.BatchNorm2d(32),
514
+ nn.ReLU(),
515
+ nn.MaxPool2d(2)
516
+ )
517
+ self.layer_5 = nn.Sequential(
518
+ nn.Conv2d(32, 64, kernel_size=3, padding=1),
519
+ nn.BatchNorm2d(64),
520
+ nn.ReLU(),
521
+ nn.MaxPool2d(2)
522
+ )
523
+ self.layer_6 = nn.Sequential(
524
+ nn.Conv2d(64, 768, kernel_size=3, padding=1),
525
+ nn.BatchNorm2d(768),
526
+ nn.Sigmoid(),
527
+ nn.MaxPool2d(2)
528
+ )
529
+
530
+
531
+ def forward(self, x):
532
+
533
+ '''
534
+
535
+ original = x
536
+ x = self.conv1(x)
537
+ x = self.pool1(x)
538
+
539
+ x = self.conv2(x)
540
+ x = self.pool2(x)
541
+
542
+ x = self.conv3(x)
543
+ x = self.pool3(x)
544
+
545
+ x = self.conv4(x)
546
+ x = self.pool4(x)
547
+
548
+ x = self.conv5(x)
549
+ x = self.pool5(x)
550
+
551
+ x = self.conv6(x)
552
+ x = self.pool6(x)
553
+ oldres = x
554
+ x = original
555
+ '''
556
+
557
+ x = self.layer_1(x)
558
+
559
+ x = self.layer_2(x)
560
+
561
+ x = self.layer_3(x)
562
+
563
+ x = self.layer_4(x)
564
+
565
+ x = self.layer_5(x)
566
+
567
+ x = self.layer_6(x)
568
+
569
+ x = x.reshape(x.shape[0], x.shape[2] * x.shape[3], 3, int(x.shape[1] / 3))
570
+ return x
571
+
572
+
573
+ def _lut_transform(imgs, luts):
574
+ # img (b, 3, h, w), lut (b, c, m, m, m)
575
+ if imgs.shape[1]==1:
576
+
577
+ #for gray image pro-processs
578
+ luts = luts.expand(1,1,64,64,64)
579
+ # normalize pixel values
580
+ imgs = (imgs - .5) * 2.
581
+ grids = (imgs.unsqueeze(4)).repeat(1,1,1,1,3)
582
+ else:
583
+ # normalize pixel values
584
+ imgs = (imgs - .5) * 2.
585
+ # reshape img to grid of shape (b, 1, h, w, 3)
586
+ # imgs = imgs.permute(2,0,1).unsqueeze(dim=0)
587
+ # grids = imgs.permute(0, 2, 3, 1).unsqueeze(1)
588
+ grids = imgs.unsqueeze(0).unsqueeze(0)
589
+ luts = luts.unsqueeze(0)
590
+ # after gridsampling, output is of shape (b, c, 1, h, w)
591
+ outs = F.grid_sample(luts, grids,
592
+ mode='bilinear', padding_mode='border', align_corners=True)
593
+ return outs.squeeze(2)
594
+
595
+
596
+ if __name__ == '__main__':
597
+
598
+ import torch
599
+ import cv2
600
+
601
+ grid_size = [8,8]
602
+
603
+ np.random.seed(42)
604
+ rand_img = np.random.random((512, 512, 3))
605
+ luts_np = np.random.random((64, 3, 9))
606
+
607
+ img_torch = torch.tensor(rand_img, dtype=torch.float32).cuda()
608
+ luts_torch = torch.tensor(luts_np, dtype=torch.float32).cuda()
609
+
610
+
611
+ iluts = []
612
+ for i in range(luts_torch.shape[0]):
613
+ iluts.append(torch.stack(
614
+ torch.meshgrid(*(luts_torch[i].unbind(0)[::-1])),
615
+ dim=0).flip(0))
616
+ iluts = torch.stack(iluts, dim=0)
617
+
618
+
619
+ result = do_interpolation_lut(img_torch, iluts, grid_size)
620
+ print(result)
621
+
622
+
623
+
624
+
625
+
626
+
IIR-Lab/ISP_pipeline/raw_prc_pipeline/color.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def rgb2gray(data):
5
+ return 0.299 * data[:, :, 0] + \
6
+ 0.587 * data[:, :, 1] + \
7
+ 0.114 * data[:, :, 2]
8
+
9
+
10
+ def rgb2ycc(data, rule="bt601"):
11
+ # map to select kr and kb
12
+ kr_kb_dict = {"bt601": [0.299, 0.114],
13
+ "bt709": [0.2126, 0.0722],
14
+ "bt2020": [0.2627, 0.0593]}
15
+
16
+ kr = kr_kb_dict[rule][0]
17
+ kb = kr_kb_dict[rule][1]
18
+ kg = 1 - (kr + kb)
19
+
20
+ output = np.empty(np.shape(data), dtype=np.float32)
21
+ output[:, :, 0] = kr * data[:, :, 0] + \
22
+ kg * data[:, :, 1] + \
23
+ kb * data[:, :, 2]
24
+ output[:, :, 1] = 0.5 * ((data[:, :, 2] - output[:, :, 0]) / (1 - kb))
25
+ output[:, :, 2] = 0.5 * ((data[:, :, 0] - output[:, :, 0]) / (1 - kr))
26
+
27
+ return output
28
+
29
+
30
+ def ycc2rgb(data, rule="bt601"):
31
+ # map to select kr and kb
32
+ kr_kb_dict = {"bt601": [0.299, 0.114],
33
+ "bt709": [0.2126, 0.0722],
34
+ "bt2020": [0.2627, 0.0593]}
35
+
36
+ kr = kr_kb_dict[rule][0]
37
+ kb = kr_kb_dict[rule][1]
38
+ kg = 1 - (kr + kb)
39
+
40
+ output = np.empty(np.shape(data), dtype=np.float32)
41
+ output[:, :, 0] = 2. * data[:, :, 2] * (1 - kr) + data[:, :, 0]
42
+ output[:, :, 2] = 2. * data[:, :, 1] * (1 - kb) + data[:, :, 0]
43
+ output[:, :, 1] = (data[:, :, 0] - kr * output[:, :, 0] - kb * output[:, :, 2]) / kg
44
+
45
+ return output
46
+
47
+
48
+ def degamma_srgb(data, clip_range=[0, 65535]):
49
+ # bring data in range 0 to 1
50
+ data = np.clip(data, clip_range[0], clip_range[1])
51
+ data = np.divide(data, clip_range[1])
52
+
53
+ data = np.asarray(data)
54
+ mask = data > 0.04045
55
+
56
+ # basically, if data[x, y, c] > 0.04045, data[x, y, c] = ( (data[x, y, c] + 0.055) / 1.055 ) ^ 2.4
57
+ # else, data[x, y, c] = data[x, y, c] / 12.92
58
+ data[mask] += 0.055
59
+ data[mask] /= 1.055
60
+ data[mask] **= 2.4
61
+
62
+ data[np.invert(mask)] /= 12.92
63
+
64
+ # rescale
65
+ return np.clip(data * clip_range[1], clip_range[0], clip_range[1])
66
+
67
+
68
+ def degamma_adobe_rgb_1998(data, clip_range=[0, 65535]):
69
+ # bring data in range 0 to 1
70
+ data = np.clip(data, clip_range[0], clip_range[1])
71
+ data = np.divide(data, clip_range[1])
72
+
73
+ data = np.power(data, 2.2) # originally raised to 2.19921875
74
+
75
+ # rescale
76
+ return np.clip(data * clip_range[1], clip_range[0], clip_range[1])
77
+
78
+
79
+ def rgb2xyz(data, color_space="srgb", clip_range=[0, 255]):
80
+ # input rgb in range clip_range
81
+ # output xyz is in range 0 to 1
82
+ if color_space == "srgb":
83
+ # degamma / linearization
84
+ data = degamma_srgb(data, clip_range)
85
+ data = np.float32(data)
86
+ data = np.divide(data, clip_range[1])
87
+
88
+ # matrix multiplication`
89
+ output = np.empty(np.shape(data), dtype=np.float32)
90
+ output[:, :, 0] = data[:, :, 0] * 0.4124 + data[:, :, 1] * 0.3576 + data[:, :, 2] * 0.1805
91
+ output[:, :, 1] = data[:, :, 0] * 0.2126 + data[:, :, 1] * 0.7152 + data[:, :, 2] * 0.0722
92
+ output[:, :, 2] = data[:, :, 0] * 0.0193 + data[:, :, 1] * 0.1192 + data[:, :, 2] * 0.9505
93
+ elif color_space == "adobe-rgb-1998":
94
+ # degamma / linearization
95
+ data = degamma_adobe_rgb_1998(data, clip_range)
96
+ data = np.float32(data)
97
+ data = np.divide(data, clip_range[1])
98
+
99
+ # matrix multiplication
100
+ output = np.empty(np.shape(data), dtype=np.float32)
101
+ output[:, :, 0] = data[:, :, 0] * 0.5767309 + data[:, :, 1] * 0.1855540 + data[:, :, 2] * 0.1881852
102
+ output[:, :, 1] = data[:, :, 0] * 0.2973769 + data[:, :, 1] * 0.6273491 + data[:, :, 2] * 0.0752741
103
+ output[:, :, 2] = data[:, :, 0] * 0.0270343 + data[:, :, 1] * 0.0706872 + data[:, :, 2] * 0.9911085
104
+ elif color_space == "linear":
105
+ # matrix multiplication`
106
+ output = np.empty(np.shape(data), dtype=np.float32)
107
+ data = np.float32(data)
108
+ data = np.divide(data, clip_range[1])
109
+ output[:, :, 0] = data[:, :, 0] * 0.4124 + data[:, :, 1] * 0.3576 + data[:, :, 2] * 0.1805
110
+ output[:, :, 1] = data[:, :, 0] * 0.2126 + data[:, :, 1] * 0.7152 + data[:, :, 2] * 0.0722
111
+ output[:, :, 2] = data[:, :, 0] * 0.0193 + data[:, :, 1] * 0.1192 + data[:, :, 2] * 0.9505
112
+ else:
113
+ print("Warning! color_space must be srgb or adobe-rgb-1998.")
114
+ return
115
+
116
+ return output
117
+
118
+
119
+ def gamma_srgb(data, clip_range=[0, 65535]):
120
+ # bring data in range 0 to 1
121
+ data = np.clip(data, clip_range[0], clip_range[1])
122
+ data = np.divide(data, clip_range[1])
123
+
124
+ data = np.asarray(data)
125
+ mask = data > 0.0031308
126
+
127
+ # basically, if data[x, y, c] > 0.0031308, data[x, y, c] = 1.055 * ( var_R(i, j) ^ ( 1 / 2.4 ) ) - 0.055
128
+ # else, data[x, y, c] = data[x, y, c] * 12.92
129
+ data[mask] **= 0.4167
130
+ data[mask] *= 1.055
131
+ data[mask] -= 0.055
132
+
133
+ data[np.invert(mask)] *= 12.92
134
+
135
+ # rescale
136
+ return np.clip(data * clip_range[1], clip_range[0], clip_range[1])
137
+
138
+
139
+ def gamma_adobe_rgb_1998(data, clip_range=[0, 65535]):
140
+ # bring data in range 0 to 1
141
+ data = np.clip(data, clip_range[0], clip_range[1])
142
+ data = np.divide(data, clip_range[1])
143
+
144
+ data = np.power(data, 0.4545)
145
+
146
+ # rescale
147
+ return np.clip(data * clip_range[1], clip_range[0], clip_range[1])
148
+
149
+
150
+ def xyz2rgb(data, color_space="srgb", clip_range=[0, 255]):
151
+ # input xyz is in range 0 to 1
152
+ # output rgb in clip_range
153
+
154
+ # allocate space for output
155
+ output = np.empty(np.shape(data), dtype=np.float32)
156
+
157
+ if color_space == "srgb":
158
+ # matrix multiplication
159
+ output[:, :, 0] = data[:, :, 0] * 3.2406 + data[:, :, 1] * -1.5372 + data[:, :, 2] * -0.4986
160
+ output[:, :, 1] = data[:, :, 0] * -0.9689 + data[:, :, 1] * 1.8758 + data[:, :, 2] * 0.0415
161
+ output[:, :, 2] = data[:, :, 0] * 0.0557 + data[:, :, 1] * -0.2040 + data[:, :, 2] * 1.0570
162
+
163
+ # gamma to retain nonlinearity
164
+ output = gamma_srgb(output * clip_range[1], clip_range)
165
+ elif color_space == "adobe-rgb-1998":
166
+ # matrix multiplication
167
+ output[:, :, 0] = data[:, :, 0] * 2.0413690 + data[:, :, 1] * -0.5649464 + data[:, :, 2] * -0.3446944
168
+ output[:, :, 1] = data[:, :, 0] * -0.9692660 + data[:, :, 1] * 1.8760108 + data[:, :, 2] * 0.0415560
169
+ output[:, :, 2] = data[:, :, 0] * 0.0134474 + data[:, :, 1] * -0.1183897 + data[:, :, 2] * 1.0154096
170
+
171
+ # gamma to retain nonlinearity
172
+ output = gamma_adobe_rgb_1998(output * clip_range[1], clip_range)
173
+ elif color_space == "linear":
174
+
175
+ # matrix multiplication
176
+ output[:, :, 0] = data[:, :, 0] * 3.2406 + data[:, :, 1] * -1.5372 + data[:, :, 2] * -0.4986
177
+ output[:, :, 1] = data[:, :, 0] * -0.9689 + data[:, :, 1] * 1.8758 + data[:, :, 2] * 0.0415
178
+ output[:, :, 2] = data[:, :, 0] * 0.0557 + data[:, :, 1] * -0.2040 + data[:, :, 2] * 1.0570
179
+
180
+ # gamma to retain nonlinearity
181
+ output = output * clip_range[1]
182
+ else:
183
+ print("Warning! color_space must be srgb or adobe-rgb-1998.")
184
+ return
185
+
186
+ return output
187
+
188
+
189
+ def get_xyz_reference(cie_version="1931", illuminant="d65"):
190
+ if cie_version == "1931":
191
+ xyz_reference_dictionary = {"A": [109.850, 100.0, 35.585],
192
+ "B": [99.0927, 100.0, 85.313],
193
+ "C": [98.074, 100.0, 118.232],
194
+ "d50": [96.422, 100.0, 82.521],
195
+ "d55": [95.682, 100.0, 92.149],
196
+ "d65": [95.047, 100.0, 108.883],
197
+ "d75": [94.972, 100.0, 122.638],
198
+ "E": [100.0, 100.0, 100.0],
199
+ "F1": [92.834, 100.0, 103.665],
200
+ "F2": [99.187, 100.0, 67.395],
201
+ "F3": [103.754, 100.0, 49.861],
202
+ "F4": [109.147, 100.0, 38.813],
203
+ "F5": [90.872, 100.0, 98.723],
204
+ "F6": [97.309, 100.0, 60.191],
205
+ "F7": [95.044, 100.0, 108.755],
206
+ "F8": [96.413, 100.0, 82.333],
207
+ "F9": [100.365, 100.0, 67.868],
208
+ "F10": [96.174, 100.0, 81.712],
209
+ "F11": [100.966, 100.0, 64.370],
210
+ "F12": [108.046, 100.0, 39.228]}
211
+ elif cie_version == "1964":
212
+ xyz_reference_dictionary = {"A": [111.144, 100.0, 35.200],
213
+ "B": [99.178, 100.0, 84.3493],
214
+ "C": [97.285, 100.0, 116.145],
215
+ "D50": [96.720, 100.0, 81.427],
216
+ "D55": [95.799, 100.0, 90.926],
217
+ "D65": [94.811, 100.0, 107.304],
218
+ "D75": [94.416, 100.0, 120.641],
219
+ "E": [100.0, 100.0, 100.0],
220
+ "F1": [94.791, 100.0, 103.191],
221
+ "F2": [103.280, 100.0, 69.026],
222
+ "F3": [108.968, 100.0, 51.965],
223
+ "F4": [114.961, 100.0, 40.963],
224
+ "F5": [93.369, 100.0, 98.636],
225
+ "F6": [102.148, 100.0, 62.074],
226
+ "F7": [95.792, 100.0, 107.687],
227
+ "F8": [97.115, 100.0, 81.135],
228
+ "F9": [102.116, 100.0, 67.826],
229
+ "F10": [99.001, 100.0, 83.134],
230
+ "F11": [103.866, 100.0, 65.627],
231
+ "F12": [111.428, 100.0, 40.353]}
232
+ else:
233
+ print("Warning! cie_version must be 1931 or 1964.")
234
+ return
235
+ return np.divide(xyz_reference_dictionary[illuminant], 100.0)
236
+
237
+
238
+ def xyz2lab(data, cie_version="1931", illuminant="d65"):
239
+ xyz_reference = get_xyz_reference(cie_version, illuminant)
240
+
241
+ data = data
242
+ data[:, :, 0] = data[:, :, 0] / xyz_reference[0]
243
+ data[:, :, 1] = data[:, :, 1] / xyz_reference[1]
244
+ data[:, :, 2] = data[:, :, 2] / xyz_reference[2]
245
+
246
+ data = np.asarray(data)
247
+
248
+ # if data[x, y, c] > 0.008856, data[x, y, c] = data[x, y, c] ^ (1/3)
249
+ # else, data[x, y, c] = 7.787 * data[x, y, c] + 16/116
250
+ mask = data > 0.008856
251
+ data[mask] **= 1. / 3.
252
+ data[np.invert(mask)] *= 7.787
253
+ data[np.invert(mask)] += 16. / 116.
254
+
255
+ data = np.float32(data)
256
+ output = np.empty(np.shape(data), dtype=np.float32)
257
+ output[:, :, 0] = 116. * data[:, :, 1] - 16.
258
+ output[:, :, 1] = 500. * (data[:, :, 0] - data[:, :, 1])
259
+ output[:, :, 2] = 200. * (data[:, :, 1] - data[:, :, 2])
260
+
261
+ return output
262
+
263
+
264
+ def lab2xyz(data, cie_version="1931", illuminant="d65"):
265
+ output = np.empty(np.shape(data), dtype=np.float32)
266
+
267
+ output[:, :, 1] = (data[:, :, 0] + 16.) / 116.
268
+ output[:, :, 0] = (data[:, :, 1] / 500.) + output[:, :, 1]
269
+ output[:, :, 2] = output[:, :, 1] - (data[:, :, 2] / 200.)
270
+
271
+ # if output[x, y, c] > 0.008856, output[x, y, c] ^ 3
272
+ # else, output[x, y, c] = ( output[x, y, c] - 16/116 ) / 7.787
273
+ output = np.asarray(output)
274
+ mask = output > 0.008856
275
+ output[mask] **= 3.
276
+ output[np.invert(mask)] -= 16 / 116
277
+ output[np.invert(mask)] /= 7.787
278
+
279
+ xyz_reference = get_xyz_reference(cie_version, illuminant)
280
+
281
+ output = np.float32(output)
282
+ output[:, :, 0] = output[:, :, 0] * xyz_reference[0]
283
+ output[:, :, 1] = output[:, :, 1] * xyz_reference[1]
284
+ output[:, :, 2] = output[:, :, 2] * xyz_reference[2]
285
+
286
+ return output
287
+
288
+
289
+ def lab2lch(data):
290
+ output = np.empty(np.shape(data), dtype=np.float32)
291
+
292
+ output[:, :, 0] = data[:, :, 0] # L transfers directly
293
+ output[:, :, 1] = np.power(np.power(data[:, :, 1], 2) + np.power(data[:, :, 2], 2), 0.5)
294
+ output[:, :, 2] = np.arctan2(data[:, :, 2], data[:, :, 1]) * 180 / np.pi
295
+
296
+ return output
297
+
298
+
299
+ def lch2lab(data):
300
+ output = np.empty(np.shape(data), dtype=np.float32)
301
+
302
+ output[:, :, 0] = data[:, :, 0] # L transfers directly
303
+ output[:, :, 1] = np.multiply(np.cos(data[:, :, 2] * np.pi / 180), data[:, :, 1])
304
+ output[:, :, 2] = np.multiply(np.sin(data[:, :, 2] * np.pi / 180), data[:, :, 1])
305
+
306
+ return output
IIR-Lab/ISP_pipeline/raw_prc_pipeline/csrnet_network.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+
7
+
8
+ class Condition(nn.Module):
9
+ def __init__(self, in_nc=3, nf=32):
10
+ super(Condition, self).__init__()
11
+ stride = 2
12
+ pad = 0
13
+ self.pad = nn.ZeroPad2d(1)
14
+ self.conv1 = nn.Conv2d(in_nc, nf, 7, stride, pad, bias=True)
15
+ self.conv2 = nn.Conv2d(nf, nf, 3, stride, pad, bias=True)
16
+ self.conv3 = nn.Conv2d(nf, nf, 3, stride, pad, bias=True)
17
+ self.act = nn.ReLU(inplace=True)
18
+
19
+ def forward(self, x):
20
+ conv1_out = self.act(self.conv1(self.pad(x)))
21
+ conv2_out = self.act(self.conv2(self.pad(conv1_out)))
22
+ conv3_out = self.act(self.conv3(self.pad(conv2_out)))
23
+ out = torch.mean(conv3_out, dim=[2, 3], keepdim=False)
24
+
25
+ return out
26
+
27
+
28
+ # 3layers with control
29
+ class CSRNet(nn.Module):
30
+ def __init__(self, in_nc=3, out_nc=3, base_nf=48, cond_nf=24):
31
+ super(CSRNet, self).__init__()
32
+
33
+ self.base_nf = base_nf
34
+ self.out_nc = out_nc
35
+
36
+ self.cond_net = Condition(in_nc=in_nc, nf=cond_nf)
37
+
38
+ self.cond_scale1 = nn.Linear(cond_nf, base_nf, bias=True)
39
+ self.cond_scale2 = nn.Linear(cond_nf, base_nf, bias=True)
40
+ self.cond_scale3 = nn.Linear(cond_nf, 3, bias=True)
41
+
42
+ self.cond_shift1 = nn.Linear(cond_nf, base_nf, bias=True)
43
+ self.cond_shift2 = nn.Linear(cond_nf, base_nf, bias=True)
44
+ self.cond_shift3 = nn.Linear(cond_nf, 3, bias=True)
45
+
46
+ self.conv1 = nn.Conv2d(in_nc, base_nf, 1, 1, bias=True)
47
+ self.conv2 = nn.Conv2d(base_nf, base_nf, 1, 1, bias=True)
48
+ self.conv3 = nn.Conv2d(base_nf, out_nc, 1, 1, bias=True)
49
+
50
+ self.act = nn.ReLU(inplace=True)
51
+
52
+
53
+ def forward(self, x):
54
+ cond = self.cond_net(x)
55
+
56
+ scale1 = self.cond_scale1(cond)
57
+ shift1 = self.cond_shift1(cond)
58
+
59
+ scale2 = self.cond_scale2(cond)
60
+ shift2 = self.cond_shift2(cond)
61
+
62
+ scale3 = self.cond_scale3(cond)
63
+ shift3 = self.cond_shift3(cond)
64
+
65
+ out = self.conv1(x)
66
+ out = out * scale1.view(-1, self.base_nf, 1, 1) + shift1.view(-1, self.base_nf, 1, 1) + out
67
+ out = self.act(out)
68
+
69
+
70
+ out = self.conv2(out)
71
+ out = out * scale2.view(-1, self.base_nf, 1, 1) + shift2.view(-1, self.base_nf, 1, 1) + out
72
+ out = self.act(out)
73
+
74
+ out = self.conv3(out)
75
+ out = out * scale3.view(-1, self.out_nc, 1, 1) + shift3.view(-1, self.out_nc, 1, 1) + out
76
+ return out
IIR-Lab/ISP_pipeline/raw_prc_pipeline/exif_data_formats.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class ExifFormat:
2
+ def __init__(self, id, name, size, short_name):
3
+ self.id = id
4
+ self.name = name
5
+ self.size = size
6
+ self.short_name = short_name # used with struct.unpack()
7
+
8
+
9
+ exif_formats = {
10
+ 1: ExifFormat(1, 'unsigned byte', 1, 'B'),
11
+ 2: ExifFormat(2, 'ascii string', 1, 's'),
12
+ 3: ExifFormat(3, 'unsigned short', 2, 'H'),
13
+ 4: ExifFormat(4, 'unsigned long', 4, 'L'),
14
+ 5: ExifFormat(5, 'unsigned rational', 8, ''),
15
+ 6: ExifFormat(6, 'signed byte', 1, 'b'),
16
+ 7: ExifFormat(7, 'undefined', 1, 'B'), # consider `undefined` as `unsigned byte`
17
+ 8: ExifFormat(8, 'signed short', 2, 'h'),
18
+ 9: ExifFormat(9, 'signed long', 4, 'l'),
19
+ 10: ExifFormat(10, 'signed rational', 8, ''),
20
+ 11: ExifFormat(11, 'single float', 4, 'f'),
21
+ 12: ExifFormat(12, 'double float', 8, 'd'),
22
+ }