Translate

Saturday, December 26, 2015

OPM Quality Basic Queries

select test_class Class,test_class_desc Description from GMD_TEST_CLASSES

select * from gmd_test_classes------------------------CLASSSES AVAILABLE

select * from  GMD_TEST_METHODS---------TEST METHODS\

SELECT TEST_METHOD_CODE METHOD,TEST_METHOD_DESC DESCRIPTION , TEST_QTY QTY, TEST_QTY_UOM UOM, DISPLAY_PRECISION Stored_Precision,
Days, hours,Minutes, seconds,TEST_REPLICATE Replicate from GMD_TEST_METHODS------------------------------------Test_Methods

select * from GMD_UNITS ---------------------------------Test_Units
select QCUNIT_CODE Units, QCUNIT_DESC Description from gmd_units-----------------Test Units

select * from MTL_ACTIONS --------------------action codes
select ACTION_CODE Action , DESCRIPTION from mtl_actions-----action codes


select ORGANIZATION_ID Organization, SAMPLE_LAST_ASSIGNED Sample_Last_Assigned,SS_LAST_ASSIGNED SS_Last_Assigned
from gmd_quality_config----------------------------------Process Quality Parameters
select * from gmd_quality_config --------------------------------Process Quality Parameters

select  SAMPLING_PLAN_NAME Plan_Name, SAMPLING_PLAN_DESC Description,SAMPLE_CNT_REQ Count,
SAMPLE_QTY Quantity, SAMPLE_QTY_UOM UOM,
Decode( Frequency_type , 'F' , 'Fixed Number') Frequency, FREQUENCY_CNT Per,
Decode(FREQUENCY_PER , 'FS','Batch step',
                       'FR', 'Receipt',
                       'FB', 'Batch') Sample_UOM,
RESERVE_CNT_REQ Res_Count, RESERVE_QTY Res_Qty,
ARCHIVE_CNT_REQ Arch_Count,ARCHIVE_QTY Arch_Qty
from gmd_sampling_plans----------------------------------------------------sampling Plan
select * from gmd_sampling_plans------------sampling Plan




select * from GMD_QC_TESTS where test_code='% BTM'--------------------TEST DATA

select TE.TEST_CODE Test, TE.TEST_DESC Description,TE.TEST_CLASS Class,
ME.TEST_METHOD_CODE Method,ME.TEST_METHOD_DESC Method_DESCRIPTION,
Decode (TE.TEST_TYPE,'L', 'Numeric range With Display Text',
                   'E', 'Expression',
                   'N','Numeric Range',
                   'T','Text Range',
                   'U','Non-Validated',
                   'V','List of Test Values') Data_Type,
TE.TEST_UNIT Unit,
Decode (TE.PRIORITY,'1L','Low',
                 '5N','Normal',
                 '8H','high') Priority,
TE.MIN_VALUE_NUM Range_from, TE.MAX_VALUE_NUM Range_To,
TE.DISPLAY_PRECISION Stored_Precision,TE.REPORT_PRECISION Report_Precision,
TE.EXPRESSION,
tv.MIN_NUM Min_Value,tv.max_num Max_Value,tv.DISPLAY_LABEL_NUMERIC_RANGE Display
from GMD_QC_TESTS TE,
GMD_TEST_METHODS ME,
gmd_qc_test_values tv
where te.test_method_id=me.test_method_id
and te.test_id=tv.test_id------------------------------------------------Test Data

select  * from gmd_qc_test_values---------------------------Values in Test Data Samples


select * from gmd_specifications where spec_name ='726 Sell / 200 Kg'----------------Process Quality Specifiction

select gmsp.Spec_name Spec,gmsp.SPEC_DESC Description,gmsp.SPEC_VERS Version,gms.description Status,
Decode (gmsp.SPEC_TYPE,'I','Item',
                  'M','Monitoring') Spec_Type,
gmsp.OVERLAY_IND Overlay,gmsp.base_spec_id Base_Spec,
gmsp.INVENTORY_ITEM_ID Item_ID,
kfiv.CONCATENATED_SEGMENTS Item_code,kfiv.description Item_desc,
gmsp.GRADE_CODE Grade,gmsp.OWNER_ORGANIZATION_ID Owner_Organization,fnu.user_name,
gst.seq Target_Seq,
(select gct.TEST_CLASS from GMD_QC_TESTS gct where gct.test_id=gst.test_id) Target_Class,
(select TEST_METHOD_CODE from GMD_TEST_METHODS gtm where gtm.TEST_METHOD_ID=gst.TEST_METHOD_ID) Target_Test_Method,
(select TEST_METHOD_DESC from GMD_TEST_METHODS gtm where gtm.TEST_METHOD_ID=gst.TEST_METHOD_ID) Target_Test_Method_Desc,
gst.MIN_VALUE_NUM Target_Minimum,gst.MAX_VALUE_NUM Target_Maximum,gst.TEST_QTY Target_Quantity,
gst.TEST_QTY_UOM Target_UOM, gst.TEST_REPLICATE Target_Replicate,
Decode(gst.TEST_PRIORITY,'1L','Low',
                 '5N','Normal',
                 '8H','High') Target_Priority,
gst.OPTIONAL_IND Target_optional,
gst.DISPLAY_PRECISION Formating_Stored_Precision,
gst.REPORT_PRECISION Report_Stored_Precision
from gmd_specifications gmsp,
gmd_status gms,
mtl_system_items_kfv kfiv,
gmd_spec_tests gst,
fnd_user fnu
where gmsp.spec_status=gms.status_code
and gmsp.spec_id=gst.spec_id
--and gst.test_id=gct.test_id
and gmsp.INVENTORY_ITEM_ID=kfiv.INVENTORY_ITEM_ID
and gmsp.OWNER_ORGANIZATION_ID=kfiv.ORGANIZATION_ID
and fnu.user_id=gmsp.owner_id---------------------------------------------Product Quality specifications



select gmsp.Spec_name Spec,gmsp.SPEC_DESC Description,gmsp.SPEC_VERS Version,gms.description Status,
Decode (gmsp.SPEC_TYPE,'I','Item',
                  'M','Monitoring') Spec_Type,
gmsp.OVERLAY_IND Overlay,gmsp.base_spec_id Base_Spec,
gmsp.INVENTORY_ITEM_ID Item_ID,
kfiv.CONCATENATED_SEGMENTS Item_code,kfiv.description Item_desc,
gmsp.GRADE_CODE Grade,gmsp.OWNER_ORGANIZATION_ID Owner_Organization,fnu.user_name,
gst.seq Target_Seq,
(select gct.TEST_CLASS from GMD_QC_TESTS gct where gct.test_id=gst.test_id) Target_Class,
(select TEST_METHOD_CODE from GMD_TEST_METHODS gtm where gtm.TEST_METHOD_ID=gst.TEST_METHOD_ID) Target_Test_Method,
(select TEST_METHOD_DESC from GMD_TEST_METHODS gtm where gtm.TEST_METHOD_ID=gst.TEST_METHOD_ID) Target_Test_Method_Desc,
gst.MIN_VALUE_NUM Target_Minimum,gst.MAX_VALUE_NUM Target_Maximum,gst.TEST_QTY Target_Quantity,
gst.TEST_QTY_UOM Target_UOM, gst.TEST_REPLICATE Target_Replicate,
Decode(gst.TEST_PRIORITY,'1L','Low',
                 '5N','Normal',
                 '8H','High') Target_Priority,
gst.OPTIONAL_IND Target_optional,
gst.DISPLAY_PRECISION Formating_Stored_Precision,
gst.REPORT_PRECISION Report_Stored_Precision,
ver.ORGANIZATION_CODE VALIDITY_ORG_CODE ,
Decode (ver.SPEC_TYPE,'I','Inventory',
                  'W','WIP',
                  'C','Customer',
                  'S','Supplier') VALIDITY_Spec_Type,
Decode (ver.DELETE_MARK,'0','No',
                  '1','Yes') Validity_Deleted,
ver.SPEC_VR_STATUS_DESC Validity_Spec_rule,
ver.start_date Validity_start_date,
ver.end_date Validity_end_date,
ver.PARENT_LOT_NUMBER Validity_Parent_Lot,
ver.LOT_NUMBER Validity_Lot_Number,
ver.SUBINVENTORY Validity_SubInventory,
ver.IN_SPEC_LOT_STATUS_CODE Validity_LOT_Status,
ver.OUT_OF_SPEC_LOT_STATUS_CODE Validity_out_of_spec_Lot
from gmd_specifications gmsp,
gmd_status gms,
mtl_system_items_kfv kfiv,
gmd_spec_tests gst,
GMD_ALL_SPEC_VRS_VL ver,
fnd_user fnu
where gmsp.spec_status=gms.status_code
and gmsp.spec_id=gst.spec_id
and ver.spec_id=gmsp.spec_id
and ver.spec_id=gst.spec_id
and gmsp.INVENTORY_ITEM_ID=kfiv.INVENTORY_ITEM_ID
and gmsp.OWNER_ORGANIZATION_ID=kfiv.ORGANIZATION_ID
and fnu.user_id=gmsp.owner_id---------------------------------------------Product Quality specifications With Validity Rules

Steps involved in Implementation

HI All,

I would like to consolidate a list of best practices and common mistakes done in implmentation projects.
I think you people might have come across different best practices and some amazing workarounds.
I request all of you to provide me your valuble inputs to put a road map for a ideal implementation.
From now onwards I will be sending different best practices and Process so that we can evalutate the pros and cons of each.

First of all let me list different techonologies and tools required at different stages of implementation.
From now onwards we can evaluate each technology and tool for different types of situations.

Data loading stage:

1.sql loader
2.GUI tools like DATA loader
3.Oracle External Tables(from 9i)
4.WebADI
User interface Development:
1.Forms6i
2.OAF(Jdeveloper) Reporting Tools:
1.Reports Builder
2.XML Publisher
3.PL/SQL Reports
4.Oracle Discoverer
Business logic implementation:
1.Work Flow
2.AME(Approval's Management) Outbound interfaces:
1.EDI
2.XML Gate way
3.Custom made outbound interfaces using PL/Sql
Inbound Interfaces:
1.Open Interfaces
2.API’s
3.EDI
4.XML Gateway(Not Sure)
Others:
1.Oracle Alerts
2.OAF Personalization
3.Forms Personalization
Tools:
1.TOAD
2.SQL Client
3.PL/SQL Developer

Code Formatting Tools:
1.Formatter Plus
2.PL/SQL Developer Formatter Configuration Tools:
1.VSS
2.CVS


===========================================================

1. keep the source of all Customized objects(Tables,Packages,Procedures ..) in the PLS folder in the custom top.
It is easy to say but making people to follow it is very difficult(Even i don't :-) ).One way to make people to follow is by dropping objects whose lastest source is not present in the UNIX box.
we need to compare the last update date in the UNIX box and last_modified date in the all_objects...i witnessed this idea really working..This also removes the Cost factor of maintaining a VSS(VSS might have its own Advantages) also...

2.All Customized objects should be labeled.Example XXLGEOM2309.and the name of the object should be decided prior to the development
Lets say for example u have a Custom process involving a concurrent program,a form and a workflow.
For each object if u have one lablel right at the design phase all the documets can have a process design showing different components getting involved at differnt stages in the process.

3.A Repository of all the process and all the objects associated with it should be maintained.Thats what MD70 should contain...But most of the times it wont..they are always some missing objects.
we will realize the importance of it when the people starts leaving the company during implemenataion..

Functional:

1.Its always better to identify one process owner From the customer side for each process or Area.
Even though it is not with in our control the management should insist for it from the starting because it clears a lot of hurdles during implementation

2.All Functional people should have a fair idea of all the technologies used . they need not be experts but aleast they should understand the usages of it,its advantages&limitations .
(Guys with a little idea straightaway dont start urself giving techincal solutions..use the knowlege to understand what's happening..in future after enought experience u may....)
This will help u a lot when u r designing solutions...

Technical:
Let us discuss the reports..

1.The most common custom objects we see in any normal implentation(10-20% Customization) is Reports.
I personally feel the layout designing of the report should be the in the last stage of the implementation.Because the 90% of the Customer concern will be the data that he will get to analyze and 10% will be the Format it is presented.
so it is better to show the user the raw data every time using the tools like toad.Once user is happy with the data just have the sql and keep it aside.This gives us the flexibility of adding a column or removing it during the process development
Because adding a column or removing a column in tools like report builder takes a considerable amount of time.
It doesn't remove total rework but alteast reduces it considerably..

First let me list different types of reports from customer perspective..

1.Reports Presented to Authorities and top management--Look & Feel and the layout are important
2.Reports used for analyzation purpose and day-to-day operatons--like daily Shipping details report and Shipping lead time)
3.Reports Used for Record purpose--Keeping the data aside
4.MIS Reporting--Summary reports ,Report used to tally data from differnt applications in apps--Perfomance is pretty important as they run on huge data .

Let me list different reporting tools and some of their advantages


Report Builder:
Advantages:
Good for Complex layouts involving images and for Pre -Printed Document
Developer will have more control on the layout
Many Technical People knows very much how to use it
Disadvantages:
Report building time is Relatively High
Handling small layout changes is even more difficult
Data & Layout are combined into one unit--If I want to see same data in 3 different layouts we have to design three reports

PL/SQL Reports
Advantages
Easy to Build and maintain
Just PL/sql Knowledge is Enough
Should be used for analyzation reports and day-to day operation Reports
Disadvantage
Can handle only very simple layouts

XML Publisher
Advantages
Replacement for Report Builder
Can handle Complex layout also
Layout Design can be done in Microsoftword,XSLT --Compartively Easy
We can have one report with different layouts and layout can be selected at run time
I heard even we can get single report in multiple layout by running it only one time--Not sure as I have not used it
Disadvantages:
Only available in after 11.5.9(with a patch),11.5.10 onwards
Requires sound knowledge of microsoft word tables and word feautures
Many people still don’t have much idea on it..
Some time output goes wavierd on hand experience is required to find workarounds--(My own Experience)
Discoverer Reports
Advantages:
Multi Dimension analysis
Easy to build and use
Disadvantages:
Separate License
No formatting options at all..can be used only for analysis not for reporting 

=======================================================================

HI All,
Here i try to analyze different Data loading methods available...and some other debatable issues..
Best Practices: 
1.Always have the rightly balanced teams.
A typical ratio for medium complexity process would be(1:1:2 )(Functional:Technical Designer:Developers).
Techical designer should be a guy who understands both the importance of business and technical limitations.
And the rule should be , technical designer should always be involved at the Functional Discussions with the customer.
This always help for not committing things which are not technically feasible..(Nothing is impossible ..but the effort needed to make it happen is worth or not??)

2.The other big Question is how should I divide my project team???
Method A>Functional,Technical( With in technical people by technologies they are good at like ..reports ,Forms & OAF ...)
Method B> One Process one team.(Atleast team size should be more than 20 to implement this)

There are advantages & disadvantages of both the ways..I am part of implementations which are done both ways....Lets me list some of them...
Method A: 
Advantages: Less time is taken for development of components..(As technical people are doing what they are good at..)
Disadvantages:
1.I don't know whether I am correct or not in quoting this but the truth is you will find a clear divide between the two teams..And once things start going wrong people start blaming each other...I think every one might have experienced it already..
2.For technical people it is even bigger loss because you work on different components belonging to different process and never understand how your component fits in the overall solution. At the end of day it will be just a
piece of code...
3.Any change in the process during the course of development is very difficult to handle, as there will be inter dependencies among components

Method B: 
Advantages:
1.Every one will feel the ownership of the process and better team spirit.
2.Technical team will also have better understanding of the processes and will able to implement the changes faster (as In a development project change is unavoidable ) as they have all the components with them

Disadvantages
1.Development time might be a bit long as the technical people in u r team might not have expertise in all the technologies involved in the process

My views might be a bit biased as I am strong supporter for method B..

Technical:Coming to our today's technical discussion ...data loading..This will be one of the first steps(development) and the last step(before live) of a implementation..
The typical way of data loading is a three step process.
1.To load data from the legacy/Flat files to temporary tables.
2..Perform all the validations on the temporary table data and load it to the open interfaces or API's
3.Run the Open interface Concurrent programs/API's to load data to the standard tables.

First let me figure ways of data movement in oracle apps..
1.Open Interfaces-- used for live interfaces(every day activity) & one time loading
2.API's--Almost same as Open interfaces but are easy to handle.(validation & integrity logic is taken care by them)
3.Out Bound Interfaces--Required if we integrate oracle apps with third party or legacy systems
4.EDI--automation process..we will talk about them later

For loading data
SQL Loader:1.Used when data is available in flat files(tab delimited,Comma delimited)
2.Faster & Easier way of loading data
3.Can use sql functions to modify data

DB Links: if the legacy systems is on oracle data base the best thing is to get access to the data customer want to import through db links

Data loader tool : These are third party tools used for loading ,which automates the process of punching data.There are very much user friendly and not much technical expertise is required to work with them.
But the biggest disadvantage of these tools is they are slow.If you have huge data it is not advisable to use them(unless u r patient enough to see thru :-) )
The data loaded will be valid because it is as good as manual punching..

XML: Oracle Provides Apis to import XML data and to export data into XML.This should be most convenient way for data interaction as most of the other technology systems are able to parse XML data easily.
There are some limitations(can be easily overcome) also for these in oracle like while importing XML data into oracle tables oracle can't parse huge files

WEBADI: These are oracle provided templates for data loading into some standard interfaces.Easy to use. we can create the custom api's and use for data loading..i felt this is one of the best ways of loading data

UTL_FILE: it is PL/SQL way of loading data into oracle tables.This packages gives us the API to read and data into flat files.This method is very useful when the data to be loaded is less and more validations are required
before loading.One of the limitations of this pacakge it reads data in lines and the maximum length it can read is 1022 charecters..In writing data to files it can write only 32K in one shot..later we need to close the file
and reopen it again..

External Table:This concept concept comes from Oracle 9i onwards.This is one of the easiest way of loading data.Once you create the external table you can simple use the select stament to query the table.
On performance basis this is as good as Direct path loading of SQLLDR.(Technical People Give a try for this...)

Caution:Disable any indexes on the table before loading data..other wise it will slow down the process of loading data.

On summary for all conversions(one time data movement) use External Tables or SQL Loader
For interfaces use PL/sql,dblinks or XML
HI all i have not used SQL loader much..most of the time i have used External tables ,UTL_FIle and XML.So people who has much exposure can come up with any limitations or advantages of it..
Plz let me know if any thing is wrong or any other suggestions to make this better....

P2P & O2C Technical Flow

                                 P2P Backend FLOW
Indent
SELECT * FROM PO_REQUISITION_HEADERS_ALL WHERE SEGMENT1=5671
SELECT * FROM PO_REQUISITION_LINES_ALL
WHERE REQUISITION_HEADER_ID = (SELECT REQUISITION_HEADER_ID FROM PO_REQUISITION_HEADERS_ALL WHERE SEGMENT1='5671')
PO Related
SELECT * FROM PO_HEADERS_ALL WHERE SEGMENT1=4500
SELECT * FROM PO_LINES_ALL
WHERE PO_HEADER_ID= (SELECT PO_HEADER_ID FROM PO_HEADERS_ALL WHERE SEGMENT1=4500)
SELECT * FROM PO_LINE_LOCATIONS_ALL
WHERE PO_HEADER_ID=(SELECT PO_HEADER_ID FROM PO_HEADERS_ALL WHERE SEGMENT1=4500)
SELECT * FROM PO_DISTRIBUTIONS_ALL
WHERE PO_HEADER_ID=(SELECT PO_HEADER_ID FROM PO_HEADERS_ALL WHERE SEGMENT1=4500).
Receipt Information
 SELECT * FROM RCV_SHIPMENT_HEADERS    WHERE RECEIPT_NUM =9621
SELECT * FROM RCV_SHIPMENT_LINES WHERE SHIPMENT_HEADER_ID IN (SELECT SHIPMENT_HEADER_ID FROM RCV_SHIPMENT_HEADERS  
        WHERE RECEIPT_NUM =9621 AND SHIPMENT_HEADER_ID=69428)
Select * from RCV_TRANSACTIONS
          Where PO_HEADER_ID IN (SELECT PO_HEADER_ID   FROM PO_HEADERS_ALL
                                   WHERE SEGMENT1=4500) 
SELECT * FROM mtl_material_transactions
WHERE RCV_TRANSACTION_ID IN (select TRANSACTION_ID
    From RCV_TRANSACTIONS  Where PO_HEADER_ID IN (SELECT PO_HEADER_ID FROM
                       PO_HEADERS_ALL WHERE SEGMENT1=4500))
        
            AP Invoice related
SELECT * FROM AP_INVOICES_ALL  WHERE INVOICE_NUM=4579
SELECT * FROM AP_INVOICE_DISTRIBUTIONS_ALL WHERE INVOICE_ID IN (SELECT INVOICE_ID
                       FROM AP_INVOICES_ALL WHERE INVOICE_NUM='4579')
Payment Related
 SELECT * FROM AP_PAYMENT_SCHEDULES_ALL WHERE INVOICE_ID IN (SELECT INVOICE_ID FROM AP_INVOICES_ALL WHERE INVOICE_NUM='4579')
SELECT * FROM AP_INVOICE_PAYMENTS_ALL WHERE INVOICE_ID IN (SELECT INVOICE_ID FROM AP_INVOICES_ALL
        WHERE INVOICE_NUM='4579')
SELECT * FROM AP_PAYMENT_DISTRIBUTIONS_ALL WHERE INVOICE_PAYMENT_ID IN (SELECT INVOICE_PAYMENT_ID  FROM AP_INVOICE_PAYMENTS_ALL WHERE INVOICE_ID IN (SELECT INVOICE_ID FROM AP_INVOICES_ALL  WHERE INVOICE_NUM='4579'))
SELECT * FROM AP_CHECKS_ALL   WHERE CHECK_ID=28698  
à Corresponding vendor_id and bank_account is related to that particular vendor
Bank Reconciliation in Cash Management
SELECT STATEMENT_HEADER_ID FROM CE_STATEMENT_HEADERS_ALL
WHERE STATEMENT_HEADER_ID =75826
SELECT * FROM CE_STATEMENT_LINES WHERE STATEMENT_HEADER_ID IN (SELECT STATEMENT_HEADER_ID FROM CE_STATEMENT_HEADERS_ALL
WHERE STATEMENT_HEADER_ID =75826)
GL Related
Ø  After these Invoice’s are transferred to GL by running
  Payables Transfer to General Ledger  PROGRAM
The tables affected are
    GL_JE_BATCHES
    GL_JE_HEADERS
    GL_JE_LINES    
                                           O2C FLOW
Order Related
SELECT * FROM oe_order_headers_all WHERE order_number = 57803
SELECT * FROM oe_order_lines_all WHERE header_id = (SELECT header_id FROM oe_order_headers_all WHERE order_number = 57803);
Once order is booked and pick release is next step
 SELECT * FROM wsh_delivery_details WHERE source_header_id = (SELECT header_id FROM oe_order_headers_all WHERE order_number = 57803)
Once order is pick confirm
SELECT delivery_id FROM wsh_delivery_assignments WHERE delivery_detail_id =
(SELECT delivery_detail_id FROM wsh_delivery_details WHERE source_header_id = (SELECT header_id FROM oe_order_headers_all WHERE order_number = 57803))
SELECT * FROM wsh_new_deliveries WHERE delivery_id = 211904
SELECT * FROM mtl_material_transactions WHERE transaction_id IN (11262482, 11262483)
    Once Move order is transacted
SELECT header_id FROM mtl_txn_request_headers WHERE request_number = '187252'
       SELECT * FROM mtl_txn_request_lines WHERE header_id = 209289
Once order is ship confirmed
Interface trip stop program automatically launches and order line status gets interfaced, and delivery id status is closed.
SELECT * FROM wsh_delivery_legs WHERE delivery_id = 211904
SELECT * FROM wsh_trip_stops WHERE stop_id IN (216963, 216964)
SELECT * FROM wsh_trips WHERE trip_id = 179789
Inventory Interface- SRS updates inventory with the ship confirmation information.
Inventory balances are decremented and reservations relieved. This program always spawns the Order Management Interface - SRS program. It is very important in the process flow that the Inventory Interface complete before the Order Management Interface to ensure the integrity of the reservation information.
Inventory Interface picks records from MTI (MTL_TRANSACTIONS_INTERFACE) and inserts them into MMTT (MTL_MATERIAL_TRANSACTIONS_TEMP). The Inventory Manager then processes rows and inserts them in to MTL_MATERIAL_TRANSACTIONS.
Order Management  Interface runs to update Order Management with the ship confirmation information. Order line shipped quantities will be updated.
At the end of ship confirm, invoice lines are created in Ra_interface_lines_all table.
Once Ship Confirm is done, Workflow background process runs, and inserts data in below table
Auto Invoice Master program runs to generate entry in AR tables
SELECT * FROM ra_interface_lines_all where interface_line_attribute1 = <order_number>
AR Invoices Related Tables
SELECT * FROM Ra_customer_trx_lines_all where interface_line_attribute1 = <order_number>
SELECT * FROM Ra_customer_trx_all where trx_header_id = <trx_header_id> (trx_header_id fetched from Ra_customer_trx_lines_all)
AR Receipt Table
Select * from ar_cash_receipts_all
Select * from ar_receivable_applications
Transfer TO GL
Run General Ledger Transfer Program, to transfer Accounting info in to GL.
Select * from GL_INTERFACE
Journal Import
Run Journal Import program, to import GL interface data into GL Tables.
GL_JE_BATCHES, GL_JE_HEADERS, GL_JE_LINES :- While importing GL , data in these tables will be in UN Posted Form.
Post IN GL
Once GL Batch is posted, Data will be finally available in

Select * from GL_BALANCES




Order to Cash Cycle - Tables get Affected @ Each Step


1) Order Entry
------------------------------------
oe_order_headers_all.flow_status_code = 'ENTERED'
oe_order_headers_all.booked_flag = 'N'

oe_order_lines_all.flow_status_code = 'ENTERED'
oe_order_lines_all.booked_flag = 'N'
oe_order_lines_all.open_flag = 'Y'

2) Order Booking
------------------------------------
oe_order_headers_all.flow_status_code = 'BOOKED'
oe_order_headers_all.booked_flag = 'Y'

oe_order_lines_all.flow_status_code = 'AWAITING SHIPPING'
oe_order_lines_all.booked_flag = 'Y'

wsh_delivery_details.released_status = 'R' (ready to release)

wsh_delivery_assignments.delivery_id = BLANK

3) Reservation
------------------------------------
mtl_demand

mtl_reservations

4) Pick Release
------------------------------------
wsh_new_deliveries.status_code = 'OP'

wsh_delivery_assignments.delivery_id gets assigned

wsh_delivery_details.released_status = 'S' (submitted for release)

mtl_txn_request_headers

mtl_txn_request_lines

mtl_material_transactions_temp

5) Pick Confirm
------------------------------------
oe_order_lines_all.flow_status_code = 'PICKED'

mtl_material_transactions

wsh_delivery_details.released_status = 'Y' (Released)

mtl_onhand_quantities

6) Ship Confirm
------------------------------------
Data will be removed from wsh_new_deliveries

oe_order_lines_all.flow_status_code = 'SHIPPED'

wsh_delivery_details.released_status = 'C' (Shipped)

wsh_serial_numbers

data will be deleted from mtl_demand and mtl_reservations

item qty gets deducted from mtl_onhand_quantities

7) Enter Invoice
------------------------------------
After shipping the order, workflow backgroud engine picks up records and post it to

ra_interface_lines_all

Auto invoice program picks up records from interface table and insert them into

ra_customer_trx_all (trx_number is invoice number)

ra_customer_trx_lines_all (line_attribute_1 = Order number and line_attribute_6 = Order line id)

8) Complete Line
------------------------------------
oe_order_lines_all.flow_status_code = 'SHIPPED'
oe_order_lines_all.open_flag = 'N'

9) Close Order
------------------------------------
oe_order_headers_all.flow_status_code = 'CLOSED'

oe_order_lines_all.flow_status_code = 'CLOSED'
oe_order_lines_all.open_flag = 'N'

10) Create Receipt
------------------------------------
AR_CASH_RECEIPTS_ALL

11) Transfer to General Ledger
------------------------------------
GL_INTERFACE

12) Journal Import
------------------------------------
GL_JE_BATCHES
GL_JE_HEADERS
GL_JE_LINES

13) Posting
------------------------------------
GL_BALANCES

Accounts Receivable-AR setups in Oracle EBS Functional Financials - Apps R12

   1. Oracle Receivables -> Setup -> System ->System Options     a. Receivable GL related setup    b. Transaction and Customer rela...